mirror of
https://github.com/ail-project/ail-framework.git
synced 2024-11-10 00:28:22 +00:00
chg: [kvrocks] launch + install
This commit is contained in:
parent
97c65b3626
commit
4660a8e88b
3 changed files with 806 additions and 93 deletions
152
bin/LAUNCH.sh
152
bin/LAUNCH.sh
|
@ -33,6 +33,7 @@ export PATH=$AIL_FLASK:$PATH
|
||||||
|
|
||||||
isredis=`screen -ls | egrep '[0-9]+.Redis_AIL' | cut -d. -f1`
|
isredis=`screen -ls | egrep '[0-9]+.Redis_AIL' | cut -d. -f1`
|
||||||
isardb=`screen -ls | egrep '[0-9]+.ARDB_AIL' | cut -d. -f1`
|
isardb=`screen -ls | egrep '[0-9]+.ARDB_AIL' | cut -d. -f1`
|
||||||
|
iskvrocks=`screen -ls | egrep '[0-9]+.KVROCKS_AIL' | cut -d. -f1`
|
||||||
islogged=`screen -ls | egrep '[0-9]+.Logging_AIL' | cut -d. -f1`
|
islogged=`screen -ls | egrep '[0-9]+.Logging_AIL' | cut -d. -f1`
|
||||||
isqueued=`screen -ls | egrep '[0-9]+.Queue_AIL' | cut -d. -f1`
|
isqueued=`screen -ls | egrep '[0-9]+.Queue_AIL' | cut -d. -f1`
|
||||||
is_ail_core=`screen -ls | egrep '[0-9]+.Core_AIL' | cut -d. -f1`
|
is_ail_core=`screen -ls | egrep '[0-9]+.Core_AIL' | cut -d. -f1`
|
||||||
|
@ -61,6 +62,7 @@ function helptext {
|
||||||
- All the processing modules.
|
- All the processing modules.
|
||||||
- All Redis in memory servers.
|
- All Redis in memory servers.
|
||||||
- All ARDB on disk servers.
|
- All ARDB on disk servers.
|
||||||
|
- All KVROCKS servers.
|
||||||
"$DEFAULT"
|
"$DEFAULT"
|
||||||
(Inside screen Daemons)
|
(Inside screen Daemons)
|
||||||
"$DEFAULT"
|
"$DEFAULT"
|
||||||
|
@ -104,6 +106,17 @@ function launching_ardb {
|
||||||
screen -S "ARDB_AIL" -X screen -t "6382" bash -c 'cd '${AIL_HOME}'; ardb-server '$conf_dir'6382.conf ; read x'
|
screen -S "ARDB_AIL" -X screen -t "6382" bash -c 'cd '${AIL_HOME}'; ardb-server '$conf_dir'6382.conf ; read x'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function launching_kvrocks {
|
||||||
|
conf_dir="${AIL_HOME}/configs"
|
||||||
|
|
||||||
|
screen -dmS "KVROCKS_AIL"
|
||||||
|
sleep 0.1
|
||||||
|
echo -e $GREEN"\t* Launching KVROCKS servers"$DEFAULT
|
||||||
|
|
||||||
|
sleep 0.1
|
||||||
|
screen -S "KVROCKS_AIL" -X screen -t "6383" bash -c 'cd '${AIL_HOME}'; ./kvrocks/build/kvrocks -c '$conf_dir'/6383.conf ; read x'
|
||||||
|
}
|
||||||
|
|
||||||
function launching_logs {
|
function launching_logs {
|
||||||
screen -dmS "Logging_AIL"
|
screen -dmS "Logging_AIL"
|
||||||
sleep 0.1
|
sleep 0.1
|
||||||
|
@ -277,56 +290,65 @@ function launching_scripts {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function shutting_down_redis_servers {
|
||||||
|
array=("$@")
|
||||||
|
redis_dir=${AIL_HOME}/redis/src
|
||||||
|
for port in "${array[@]}";
|
||||||
|
do
|
||||||
|
bash -c "${redis_dir}/redis-cli -p ${port} SHUTDOWN"
|
||||||
|
sleep 0.1
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
function shutting_down_redis {
|
function shutting_down_redis {
|
||||||
redis_dir=${AIL_HOME}/redis/src/
|
ports=("6379" "6380" "6381")
|
||||||
bash -c $redis_dir'redis-cli -p 6379 SHUTDOWN'
|
shutting_down_redis_servers "${ports[@]}"
|
||||||
sleep 0.1
|
|
||||||
bash -c $redis_dir'redis-cli -p 6380 SHUTDOWN'
|
|
||||||
sleep 0.1
|
|
||||||
bash -c $redis_dir'redis-cli -p 6381 SHUTDOWN'
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function shutting_down_ardb {
|
function shutting_down_ardb {
|
||||||
redis_dir=${AIL_HOME}/redis/src/
|
ports=("6382")
|
||||||
bash -c $redis_dir'redis-cli -p 6382 SHUTDOWN'
|
shutting_down_redis_servers "${ports[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
function shutting_down_kvrocks {
|
||||||
|
ports=("6383")
|
||||||
|
shutting_down_redis_servers "${ports[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
function checking_redis_servers {
|
||||||
|
db_name=$1
|
||||||
|
shift
|
||||||
|
array=("$@")
|
||||||
|
redis_dir="${AIL_HOME}/redis/src"
|
||||||
|
flag_db=0
|
||||||
|
for port in "${array[@]}";
|
||||||
|
do
|
||||||
|
sleep 0.2
|
||||||
|
bash -c "${redis_dir}/redis-cli -p ${port} PING | grep "PONG" &> /dev/null"
|
||||||
|
if [ ! $? == 0 ]; then
|
||||||
|
echo -e "${RED}\t${port} ${db_name} not ready${DEFAULT}"
|
||||||
|
flag_db=1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
return $flag_db;
|
||||||
}
|
}
|
||||||
|
|
||||||
function checking_redis {
|
function checking_redis {
|
||||||
flag_redis=0
|
ports=("6379" "6380" "6381")
|
||||||
redis_dir=${AIL_HOME}/redis/src/
|
checking_redis_servers "Redis" "${ports[@]}"
|
||||||
bash -c $redis_dir'redis-cli -p 6379 PING | grep "PONG" &> /dev/null'
|
return $?
|
||||||
if [ ! $? == 0 ]; then
|
|
||||||
echo -e $RED"\t6379 not ready"$DEFAULT
|
|
||||||
flag_redis=1
|
|
||||||
fi
|
|
||||||
sleep 0.1
|
|
||||||
bash -c $redis_dir'redis-cli -p 6380 PING | grep "PONG" &> /dev/null'
|
|
||||||
if [ ! $? == 0 ]; then
|
|
||||||
echo -e $RED"\t6380 not ready"$DEFAULT
|
|
||||||
flag_redis=1
|
|
||||||
fi
|
|
||||||
sleep 0.1
|
|
||||||
bash -c $redis_dir'redis-cli -p 6381 PING | grep "PONG" &> /dev/null'
|
|
||||||
if [ ! $? == 0 ]; then
|
|
||||||
echo -e $RED"\t6381 not ready"$DEFAULT
|
|
||||||
flag_redis=1
|
|
||||||
fi
|
|
||||||
sleep 0.1
|
|
||||||
|
|
||||||
return $flag_redis;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function checking_ardb {
|
function checking_ardb {
|
||||||
flag_ardb=0
|
ports=("6382")
|
||||||
redis_dir=${AIL_HOME}/redis/src/
|
checking_redis_servers "ARDB" "${ports[@]}"
|
||||||
sleep 0.2
|
return $?
|
||||||
bash -c $redis_dir'redis-cli -p 6382 PING | grep "PONG" &> /dev/null'
|
}
|
||||||
if [ ! $? == 0 ]; then
|
|
||||||
echo -e $RED"\t6382 ARDB not ready"$DEFAULT
|
|
||||||
flag_ardb=1
|
|
||||||
fi
|
|
||||||
|
|
||||||
return $flag_ardb;
|
function checking_kvrocks {
|
||||||
|
ports=("6383")
|
||||||
|
checking_redis_servers "KVROCKS" "${ports[@]}"
|
||||||
|
return $?
|
||||||
}
|
}
|
||||||
|
|
||||||
function wait_until_redis_is_ready {
|
function wait_until_redis_is_ready {
|
||||||
|
@ -353,6 +375,18 @@ function wait_until_ardb_is_ready {
|
||||||
echo -e $YELLOW"\t* ARDB Launched"$DEFAULT
|
echo -e $YELLOW"\t* ARDB Launched"$DEFAULT
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function wait_until_kvrocks_is_ready {
|
||||||
|
not_ready=true;
|
||||||
|
while $not_ready; do
|
||||||
|
if checking_kvrocks; then
|
||||||
|
not_ready=false
|
||||||
|
else
|
||||||
|
sleep 3
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo -e $YELLOW"\t* KVROCKS Launched"$DEFAULT
|
||||||
|
}
|
||||||
|
|
||||||
function launch_redis {
|
function launch_redis {
|
||||||
if [[ ! $isredis ]]; then
|
if [[ ! $isredis ]]; then
|
||||||
launching_redis;
|
launching_redis;
|
||||||
|
@ -369,6 +403,14 @@ function launch_ardb {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function launch_kvrocks {
|
||||||
|
if [[ ! $iskvrocks ]]; then
|
||||||
|
launching_kvrocks;
|
||||||
|
else
|
||||||
|
echo -e $RED"\t* A screen is already launched"$DEFAULT
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
function launch_logs {
|
function launch_logs {
|
||||||
if [[ ! $islogged ]]; then
|
if [[ ! $islogged ]]; then
|
||||||
launching_logs;
|
launching_logs;
|
||||||
|
@ -388,14 +430,14 @@ function launch_queues {
|
||||||
function launch_scripts {
|
function launch_scripts {
|
||||||
if [[ ! $isscripted ]]; then ############################# is core
|
if [[ ! $isscripted ]]; then ############################# is core
|
||||||
sleep 1
|
sleep 1
|
||||||
if checking_ardb && checking_redis; then
|
if checking_ardb && checking_redis && checking_kvrocks; then
|
||||||
launching_scripts;
|
launching_scripts;
|
||||||
else
|
else
|
||||||
no_script_launched=true
|
no_script_launched=true
|
||||||
while $no_script_launched; do
|
while $no_script_launched; do
|
||||||
echo -e $YELLOW"\tScript not started, waiting 5 more secondes"$DEFAULT
|
echo -e $YELLOW"\tScript not started, waiting 5 more secondes"$DEFAULT
|
||||||
sleep 5
|
sleep 5
|
||||||
if checking_redis && checking_ardb; then
|
if checking_redis && checking_ardb && checking_kvrocks; then
|
||||||
launching_scripts;
|
launching_scripts;
|
||||||
no_script_launched=false
|
no_script_launched=false
|
||||||
else
|
else
|
||||||
|
@ -446,7 +488,7 @@ function killscript {
|
||||||
}
|
}
|
||||||
|
|
||||||
function killall {
|
function killall {
|
||||||
if [[ $isredis || $isardb || $islogged || $isqueued || $is_ail_2_ail || $isscripted || $isflasked || $isfeeded || $iscrawler || $is_ail_core ]]; then
|
if [[ $isredis || $isardb || $iskvrocks || $islogged || $isqueued || $is_ail_2_ail || $isscripted || $isflasked || $isfeeded || $iscrawler || $is_ail_core || $is_ail_2_ail ]]; then
|
||||||
if [[ $isredis ]]; then
|
if [[ $isredis ]]; then
|
||||||
echo -e $GREEN"Gracefully closing redis servers"$DEFAULT
|
echo -e $GREEN"Gracefully closing redis servers"$DEFAULT
|
||||||
shutting_down_redis;
|
shutting_down_redis;
|
||||||
|
@ -456,20 +498,20 @@ function killall {
|
||||||
echo -e $GREEN"Gracefully closing ardb servers"$DEFAULT
|
echo -e $GREEN"Gracefully closing ardb servers"$DEFAULT
|
||||||
shutting_down_ardb;
|
shutting_down_ardb;
|
||||||
fi
|
fi
|
||||||
|
if [[ $iskvrocks ]]; then
|
||||||
|
echo -e $GREEN"Gracefully closing Kvrocks servers"$DEFAULT
|
||||||
|
shutting_down_kvrocks;
|
||||||
|
fi
|
||||||
echo -e $GREEN"Killing all"$DEFAULT
|
echo -e $GREEN"Killing all"$DEFAULT
|
||||||
kill $isredis $isardb $islogged $isqueued $is_ail_core $isscripted $isflasked $isfeeded $iscrawler $is_ail_2_ail
|
kill $isredis $isardb $iskvrocks $islogged $isqueued $is_ail_core $isscripted $isflasked $isfeeded $iscrawler $is_ail_2_ail
|
||||||
sleep 0.2
|
sleep 0.2
|
||||||
echo -e $ROSE`screen -ls`$DEFAULT
|
echo -e $ROSE`screen -ls`$DEFAULT
|
||||||
echo -e $GREEN"\t* $isredis $isardb $islogged $isqueued $isscripted $is_ail_2_ail $isflasked $isfeeded $iscrawler $is_ail_core killed."$DEFAULT
|
echo -e $GREEN"\t* $isredis $isardb $iskvrocks $islogged $isqueued $isscripted $is_ail_2_ail $isflasked $isfeeded $iscrawler $is_ail_core killed."$DEFAULT
|
||||||
else
|
else
|
||||||
echo -e $RED"\t* No screen to kill"$DEFAULT
|
echo -e $RED"\t* No screen to kill"$DEFAULT
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function shutdown {
|
|
||||||
bash -c "./Shutdown.py"
|
|
||||||
}
|
|
||||||
|
|
||||||
function update() {
|
function update() {
|
||||||
bin_dir=${AIL_HOME}/bin
|
bin_dir=${AIL_HOME}/bin
|
||||||
|
|
||||||
|
@ -526,6 +568,7 @@ function launch_all {
|
||||||
update;
|
update;
|
||||||
launch_redis;
|
launch_redis;
|
||||||
launch_ardb;
|
launch_ardb;
|
||||||
|
launch_kvrocks;
|
||||||
launch_logs;
|
launch_logs;
|
||||||
launch_queues;
|
launch_queues;
|
||||||
launch_scripts;
|
launch_scripts;
|
||||||
|
@ -534,7 +577,7 @@ function launch_all {
|
||||||
|
|
||||||
function menu_display {
|
function menu_display {
|
||||||
|
|
||||||
options=("Redis" "Ardb" "Logs" "Queues" "Scripts" "Flask" "Killall" "Shutdown" "Update" "Update-config" "Update-thirdparty")
|
options=("Redis" "Ardb" "Kvrocks" "Logs" "Queues" "Scripts" "Flask" "Killall" "Update" "Update-config" "Update-thirdparty")
|
||||||
|
|
||||||
menu() {
|
menu() {
|
||||||
echo "What do you want to Launch?:"
|
echo "What do you want to Launch?:"
|
||||||
|
@ -565,6 +608,9 @@ function menu_display {
|
||||||
Ardb)
|
Ardb)
|
||||||
launch_ardb;
|
launch_ardb;
|
||||||
;;
|
;;
|
||||||
|
Kvrocks)
|
||||||
|
launch_kvrocks;
|
||||||
|
;;
|
||||||
Logs)
|
Logs)
|
||||||
launch_logs;
|
launch_logs;
|
||||||
;;
|
;;
|
||||||
|
@ -580,9 +626,6 @@ function menu_display {
|
||||||
Killall)
|
Killall)
|
||||||
killall;
|
killall;
|
||||||
;;
|
;;
|
||||||
Shutdown)
|
|
||||||
shutdown;
|
|
||||||
;;
|
|
||||||
Update)
|
Update)
|
||||||
checking_configuration;
|
checking_configuration;
|
||||||
update;
|
update;
|
||||||
|
@ -618,12 +661,17 @@ while [ "$1" != "" ]; do
|
||||||
;;
|
;;
|
||||||
-la | --launchARDB ) launch_ardb;
|
-la | --launchARDB ) launch_ardb;
|
||||||
;;
|
;;
|
||||||
|
-lk | --launchKVROCKS ) launch_kvrocks;
|
||||||
|
;;
|
||||||
-lrv | --launchRedisVerify ) launch_redis;
|
-lrv | --launchRedisVerify ) launch_redis;
|
||||||
wait_until_redis_is_ready;
|
wait_until_redis_is_ready;
|
||||||
;;
|
;;
|
||||||
-lav | --launchARDBVerify ) launch_ardb;
|
-lav | --launchARDBVerify ) launch_ardb;
|
||||||
wait_until_ardb_is_ready;
|
wait_until_ardb_is_ready;
|
||||||
;;
|
;;
|
||||||
|
-lkv | --launchKVORCKSVerify ) launch_kvrocks;
|
||||||
|
wait_until_kvrocks_is_ready;
|
||||||
|
;;
|
||||||
-k | --killAll ) killall;
|
-k | --killAll ) killall;
|
||||||
;;
|
;;
|
||||||
-ks | --killscript ) killscript;
|
-ks | --killscript ) killscript;
|
||||||
|
|
650
configs/6383.conf
Normal file
650
configs/6383.conf
Normal file
|
@ -0,0 +1,650 @@
|
||||||
|
################################ GENERAL #####################################
|
||||||
|
|
||||||
|
# By default kvrocks listens for connections from all the network interfaces
|
||||||
|
# available on the server. It is possible to listen to just one or multiple
|
||||||
|
# interfaces using the "bind" configuration directive, followed by one or
|
||||||
|
# more IP addresses.
|
||||||
|
#
|
||||||
|
# Examples:
|
||||||
|
#
|
||||||
|
# bind 192.168.1.100 10.0.0.1
|
||||||
|
# bind 127.0.0.1 ::1
|
||||||
|
bind 127.0.0.1
|
||||||
|
|
||||||
|
# Unix socket.
|
||||||
|
#
|
||||||
|
# Specify the path for the unix socket that will be used to listen for
|
||||||
|
# incoming connections. There is no default, so kvrocks will not listen
|
||||||
|
# on a unix socket when not specified.
|
||||||
|
#
|
||||||
|
# unixsocket /tmp/kvrocks.sock
|
||||||
|
# unixsocketperm 777
|
||||||
|
|
||||||
|
# Accept connections on the specified port, default is 6666.
|
||||||
|
port 6383
|
||||||
|
|
||||||
|
# Close the connection after a client is idle for N seconds (0 to disable)
|
||||||
|
timeout 0
|
||||||
|
|
||||||
|
# The number of worker's threads, increase or decrease it would effect the performance.
|
||||||
|
workers 8
|
||||||
|
|
||||||
|
# By default kvrocks does not run as a daemon. Use 'yes' if you need it.
|
||||||
|
# Note that kvrocks will write a pid file in /var/run/kvrocks.pid when daemonized.
|
||||||
|
daemonize no
|
||||||
|
|
||||||
|
# Kvrocks implements cluster solution that is similar with redis cluster solution.
|
||||||
|
# You can get cluster information by CLUSTER NODES|SLOTS|INFO command, it also is
|
||||||
|
# adapted to redis-cli, redis-benchmark, redis cluster SDK and redis cluster proxy.
|
||||||
|
# But kvrocks doesn't support to communicate with each others, so you must set
|
||||||
|
# cluster topology by CLUSTER SETNODES|SETNODEID commands, more details: #219.
|
||||||
|
#
|
||||||
|
# PLEASE NOTE:
|
||||||
|
# If you enable cluster, kvrocks will encode key with its slot id calculated by
|
||||||
|
# CRC16 and modulo 16384, endoding key with its slot id makes it efficient to
|
||||||
|
# migrate keys based on slot. So if you enabled at first time, cluster mode must
|
||||||
|
# not be disabled after restarting, and vice versa. That is to say, data is not
|
||||||
|
# compatible between standalone mode with cluster mode, you must migrate data
|
||||||
|
# if you want to change mode, otherwise, kvrocks will make data corrupt.
|
||||||
|
#
|
||||||
|
# Default: no
|
||||||
|
cluster-enabled no
|
||||||
|
|
||||||
|
# Set the max number of connected clients at the same time. By default
|
||||||
|
# this limit is set to 10000 clients, however if the server is not
|
||||||
|
# able to configure the process file limit to allow for the specified limit
|
||||||
|
# the max number of allowed clients is set to the current file limit
|
||||||
|
#
|
||||||
|
# Once the limit is reached the server will close all the new connections sending
|
||||||
|
# an error 'max number of clients reached'.
|
||||||
|
#
|
||||||
|
maxclients 10000
|
||||||
|
|
||||||
|
# Require clients to issue AUTH <PASSWORD> before processing any other
|
||||||
|
# commands. This might be useful in environments in which you do not trust
|
||||||
|
# others with access to the host running kvrocks.
|
||||||
|
#
|
||||||
|
# This should stay commented out for backward compatibility and because most
|
||||||
|
# people do not need auth (e.g. they run their own servers).
|
||||||
|
#
|
||||||
|
# Warning: since kvrocks is pretty fast an outside user can try up to
|
||||||
|
# 150k passwords per second against a good box. This means that you should
|
||||||
|
# use a very strong password otherwise it will be very easy to break.
|
||||||
|
#
|
||||||
|
# requirepass foobared
|
||||||
|
|
||||||
|
# If the master is password protected (using the "masterauth" configuration
|
||||||
|
# directive below) it is possible to tell the slave to authenticate before
|
||||||
|
# starting the replication synchronization process, otherwise the master will
|
||||||
|
# refuse the slave request.
|
||||||
|
#
|
||||||
|
# masterauth foobared
|
||||||
|
|
||||||
|
# Master-Salve replication would check db name is matched. if not, the slave should
|
||||||
|
# refuse to sync the db from master. Don't use default value, set the db-name to identify
|
||||||
|
# the cluster.
|
||||||
|
db-name change.me.db
|
||||||
|
|
||||||
|
# The working directory
|
||||||
|
#
|
||||||
|
# The DB will be written inside this directory
|
||||||
|
# Note that you must specify a directory here, not a file name.
|
||||||
|
dir /home/aurelien/git/ail-framework/DATA_KVROCKS
|
||||||
|
|
||||||
|
# You can configure where to store your server logs by the log-dir.
|
||||||
|
# If you don't specify one, we will use the above `dir` as our default log directory.
|
||||||
|
# We also can send logs to stdout/stderr is as simple as:
|
||||||
|
#
|
||||||
|
# log-dir stdout
|
||||||
|
|
||||||
|
# When running daemonized, kvrocks writes a pid file in ${CONFIG_DIR}/kvrocks.pid by
|
||||||
|
# default. You can specify a custom pid file location here.
|
||||||
|
# pidfile /var/run/kvrocks.pid
|
||||||
|
pidfile ""
|
||||||
|
|
||||||
|
# You can configure a slave instance to accept writes or not. Writing against
|
||||||
|
# a slave instance may be useful to store some ephemeral data (because data
|
||||||
|
# written on a slave will be easily deleted after resync with the master) but
|
||||||
|
# may also cause problems if clients are writing to it because of a
|
||||||
|
# misconfiguration.
|
||||||
|
slave-read-only yes
|
||||||
|
|
||||||
|
# The slave priority is an integer number published by Kvrocks in the INFO output.
|
||||||
|
# It is used by Redis Sentinel in order to select a slave to promote into a
|
||||||
|
# master if the master is no longer working correctly.
|
||||||
|
#
|
||||||
|
# A slave with a low priority number is considered better for promotion, so
|
||||||
|
# for instance if there are three slave with priority 10, 100, 25 Sentinel will
|
||||||
|
# pick the one with priority 10, that is the lowest.
|
||||||
|
#
|
||||||
|
# However a special priority of 0 marks the replica as not able to perform the
|
||||||
|
# role of master, so a slave with priority of 0 will never be selected by
|
||||||
|
# Redis Sentinel for promotion.
|
||||||
|
#
|
||||||
|
# By default the priority is 100.
|
||||||
|
slave-priority 100
|
||||||
|
|
||||||
|
# TCP listen() backlog.
|
||||||
|
#
|
||||||
|
# In high requests-per-second environments you need an high backlog in order
|
||||||
|
# to avoid slow clients connections issues. Note that the Linux kernel
|
||||||
|
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
|
||||||
|
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
|
||||||
|
# in order to Get the desired effect.
|
||||||
|
tcp-backlog 511
|
||||||
|
|
||||||
|
# If the master is an old version, it may have specified replication threads
|
||||||
|
# that use 'port + 1' as listening port, but in new versions, we don't use
|
||||||
|
# extra port to implement replication. In order to allow the new replicas to
|
||||||
|
# copy old masters, you should indicate that the master uses replication port
|
||||||
|
# or not.
|
||||||
|
# If yes, that indicates master uses replication port and replicas will connect
|
||||||
|
# to 'master's listening port + 1' when synchronization.
|
||||||
|
# If no, that indicates master doesn't use replication port and replicas will
|
||||||
|
# connect 'master's listening port' when synchronization.
|
||||||
|
master-use-repl-port no
|
||||||
|
|
||||||
|
# Currently, master only checks sequence number when replica asks for PSYNC,
|
||||||
|
# that is not enough since they may have different replication history even
|
||||||
|
# the replica asking sequence is in the range of the master current WAL.
|
||||||
|
#
|
||||||
|
# We design 'Replication Sequence ID' PSYNC, we add unique replication id for
|
||||||
|
# every write batch (the operation of each command on the storage engine), so
|
||||||
|
# the combination of replication id and sequence is unique for write batch.
|
||||||
|
# The master can identify whether the replica has the same replication history
|
||||||
|
# by checking replication id and sequence.
|
||||||
|
#
|
||||||
|
# By default, it is not enabled since this stricter check may easily lead to
|
||||||
|
# full synchronization.
|
||||||
|
use-rsid-psync no
|
||||||
|
|
||||||
|
# Master-Slave replication. Use slaveof to make a kvrocks instance a copy of
|
||||||
|
# another kvrocks server. A few things to understand ASAP about kvrocks replication.
|
||||||
|
#
|
||||||
|
# 1) Kvrocks replication is asynchronous, but you can configure a master to
|
||||||
|
# stop accepting writes if it appears to be not connected with at least
|
||||||
|
# a given number of slaves.
|
||||||
|
# 2) Kvrocks slaves are able to perform a partial resynchronization with the
|
||||||
|
# master if the replication link is lost for a relatively small amount of
|
||||||
|
# time. You may want to configure the replication backlog size (see the next
|
||||||
|
# sections of this file) with a sensible value depending on your needs.
|
||||||
|
# 3) Replication is automatic and does not need user intervention. After a
|
||||||
|
# network partition slaves automatically try to reconnect to masters
|
||||||
|
# and resynchronize with them.
|
||||||
|
#
|
||||||
|
# slaveof <masterip> <masterport>
|
||||||
|
# slaveof 127.0.0.1 6379
|
||||||
|
|
||||||
|
# When a slave loses its connection with the master, or when the replication
|
||||||
|
# is still in progress, the slave can act in two different ways:
|
||||||
|
#
|
||||||
|
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
|
||||||
|
# still reply to client requests, possibly with out of date data, or the
|
||||||
|
# data set may just be empty if this is the first synchronization.
|
||||||
|
#
|
||||||
|
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
|
||||||
|
# an error "SYNC with master in progress" to all the kind of commands
|
||||||
|
# but to INFO and SLAVEOF.
|
||||||
|
#
|
||||||
|
slave-serve-stale-data yes
|
||||||
|
|
||||||
|
# To guarantee slave's data safe and serve when it is in full synchronization
|
||||||
|
# state, slave still keep itself data. But this way needs to occupy much disk
|
||||||
|
# space, so we provide a way to reduce disk occupation, slave will delete itself
|
||||||
|
# entire database before fetching files from master during full synchronization.
|
||||||
|
# If you want to enable this way, you can set 'slave-delete-db-before-fullsync'
|
||||||
|
# to yes, but you must know that database will be lost if master is down during
|
||||||
|
# full synchronization, unless you have a backup of database.
|
||||||
|
#
|
||||||
|
# This option is similar redis replicas RDB diskless load option:
|
||||||
|
# repl-diskless-load on-empty-db
|
||||||
|
#
|
||||||
|
# Default: no
|
||||||
|
slave-empty-db-before-fullsync no
|
||||||
|
|
||||||
|
# If replicas need full synchronization with master, master need to create
|
||||||
|
# checkpoint for feeding replicas, and replicas also stage a checkpoint of
|
||||||
|
# the master. If we also keep the backup, it maybe occupy extra disk space.
|
||||||
|
# You can enable 'purge-backup-on-fullsync' if disk is not sufficient, but
|
||||||
|
# that may cause remote backup copy failing.
|
||||||
|
#
|
||||||
|
# Default: no
|
||||||
|
purge-backup-on-fullsync no
|
||||||
|
|
||||||
|
# The maximum allowed rate (in MB/s) that should be used by Replication.
|
||||||
|
# If the rate exceeds max-replication-mb, replication will slow down.
|
||||||
|
# Default: 0 (i.e. no limit)
|
||||||
|
max-replication-mb 0
|
||||||
|
|
||||||
|
# The maximum allowed aggregated write rate of flush and compaction (in MB/s).
|
||||||
|
# If the rate exceeds max-io-mb, io will slow down.
|
||||||
|
# 0 is no limit
|
||||||
|
# Default: 500
|
||||||
|
max-io-mb 500
|
||||||
|
|
||||||
|
# The maximum allowed space (in GB) that should be used by RocksDB.
|
||||||
|
# If the total size of the SST files exceeds max_allowed_space, writes to RocksDB will fail.
|
||||||
|
# Please see: https://github.com/facebook/rocksdb/wiki/Managing-Disk-Space-Utilization
|
||||||
|
# Default: 0 (i.e. no limit)
|
||||||
|
max-db-size 0
|
||||||
|
|
||||||
|
# The maximum backup to keep, server cron would run every minutes to check the num of current
|
||||||
|
# backup, and purge the old backup if exceed the max backup num to keep. If max-backup-to-keep
|
||||||
|
# is 0, no backup would be keep. But now, we only support 0 or 1.
|
||||||
|
max-backup-to-keep 1
|
||||||
|
|
||||||
|
# The maximum hours to keep the backup. If max-backup-keep-hours is 0, wouldn't purge any backup.
|
||||||
|
# default: 1 day
|
||||||
|
max-backup-keep-hours 24
|
||||||
|
|
||||||
|
# max-bitmap-to-string-mb use to limit the max size of bitmap to string transformation(MB).
|
||||||
|
#
|
||||||
|
# Default: 16
|
||||||
|
max-bitmap-to-string-mb 16
|
||||||
|
|
||||||
|
################################## SLOW LOG ###################################
|
||||||
|
|
||||||
|
# The Kvrocks Slow Log is a mechanism to log queries that exceeded a specified
|
||||||
|
# execution time. The execution time does not include the I/O operations
|
||||||
|
# like talking with the client, sending the reply and so forth,
|
||||||
|
# but just the time needed to actually execute the command (this is the only
|
||||||
|
# stage of command execution where the thread is blocked and can not serve
|
||||||
|
# other requests in the meantime).
|
||||||
|
#
|
||||||
|
# You can configure the slow log with two parameters: one tells Kvrocks
|
||||||
|
# what is the execution time, in microseconds, to exceed in order for the
|
||||||
|
# command to get logged, and the other parameter is the length of the
|
||||||
|
# slow log. When a new command is logged the oldest one is removed from the
|
||||||
|
# queue of logged commands.
|
||||||
|
|
||||||
|
# The following time is expressed in microseconds, so 1000000 is equivalent
|
||||||
|
# to one second. Note that -1 value disables the slow log, while
|
||||||
|
# a value of zero forces the logging of every command.
|
||||||
|
slowlog-log-slower-than 100000
|
||||||
|
|
||||||
|
# There is no limit to this length. Just be aware that it will consume memory.
|
||||||
|
# You can reclaim memory used by the slow log with SLOWLOG RESET.
|
||||||
|
slowlog-max-len 128
|
||||||
|
|
||||||
|
# If you run kvrocks from upstart or systemd, kvrocks can interact with your
|
||||||
|
# supervision tree. Options:
|
||||||
|
# supervised no - no supervision interaction
|
||||||
|
# supervised upstart - signal upstart by putting kvrocks into SIGSTOP mode
|
||||||
|
# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
|
||||||
|
# supervised auto - detect upstart or systemd method based on
|
||||||
|
# UPSTART_JOB or NOTIFY_SOCKET environment variables
|
||||||
|
# Note: these supervision methods only signal "process is ready."
|
||||||
|
# They do not enable continuous liveness pings back to your supervisor.
|
||||||
|
supervised no
|
||||||
|
|
||||||
|
################################## PERF LOG ###################################
|
||||||
|
|
||||||
|
# The Kvrocks Perf Log is a mechanism to log queries' performance context that
|
||||||
|
# exceeded a specified execution time. This mechanism uses rocksdb's
|
||||||
|
# Perf Context and IO Stats Context, Please see:
|
||||||
|
# https://github.com/facebook/rocksdb/wiki/Perf-Context-and-IO-Stats-Context
|
||||||
|
#
|
||||||
|
# This mechanism is enabled when profiling-sample-commands is not empty and
|
||||||
|
# profiling-sample-ratio greater than 0.
|
||||||
|
# It is important to note that this mechanism affects performance, but it is
|
||||||
|
# useful for troubleshooting performance bottlenecks, so it should only be
|
||||||
|
# enabled when performance problems occur.
|
||||||
|
|
||||||
|
# The name of the commands you want to record. Must be original name of
|
||||||
|
# commands supported by Kvrocks. Use ',' to separate multiple commands and
|
||||||
|
# use '*' to record all commands supported by Kvrocks.
|
||||||
|
# Example:
|
||||||
|
# - Single command: profiling-sample-commands get
|
||||||
|
# - Multiple commands: profiling-sample-commands get,mget,hget
|
||||||
|
#
|
||||||
|
# Default: empty
|
||||||
|
# profiling-sample-commands ""
|
||||||
|
|
||||||
|
# Ratio of the samples would be recorded. We simply use the rand to determine
|
||||||
|
# whether to record the sample or not.
|
||||||
|
#
|
||||||
|
# Default: 0
|
||||||
|
profiling-sample-ratio 0
|
||||||
|
|
||||||
|
# There is no limit to this length. Just be aware that it will consume memory.
|
||||||
|
# You can reclaim memory used by the perf log with PERFLOG RESET.
|
||||||
|
#
|
||||||
|
# Default: 256
|
||||||
|
profiling-sample-record-max-len 256
|
||||||
|
|
||||||
|
# profiling-sample-record-threshold-ms use to tell the kvrocks when to record.
|
||||||
|
#
|
||||||
|
# Default: 100 millisecond
|
||||||
|
profiling-sample-record-threshold-ms 100
|
||||||
|
|
||||||
|
################################## CRON ###################################
|
||||||
|
|
||||||
|
# Compact Scheduler, auto compact at schedule time
|
||||||
|
# time expression format is the same as crontab(currently only support * and int)
|
||||||
|
# e.g. compact-cron 0 3 * * * 0 4 * * *
|
||||||
|
# would compact the db at 3am and 4am everyday
|
||||||
|
# compact-cron 0 3 * * *
|
||||||
|
|
||||||
|
# The hour range that compaction checker would be active
|
||||||
|
# e.g. compaction-checker-range 0-7 means compaction checker would be worker between
|
||||||
|
# 0-7am every day.
|
||||||
|
compaction-checker-range 0-7
|
||||||
|
|
||||||
|
# Bgsave scheduler, auto bgsave at schedule time
|
||||||
|
# time expression format is the same as crontab(currently only support * and int)
|
||||||
|
# e.g. bgsave-cron 0 3 * * * 0 4 * * *
|
||||||
|
# would bgsave the db at 3am and 4am everyday
|
||||||
|
|
||||||
|
# Command renaming.
|
||||||
|
#
|
||||||
|
# It is possible to change the name of dangerous commands in a shared
|
||||||
|
# environment. For instance the KEYS command may be renamed into something
|
||||||
|
# hard to guess so that it will still be available for internal-use tools
|
||||||
|
# but not available for general clients.
|
||||||
|
#
|
||||||
|
# Example:
|
||||||
|
#
|
||||||
|
# rename-command KEYS b840fc02d524045429941cc15f59e41cb7be6c52
|
||||||
|
#
|
||||||
|
# It is also possible to completely kill a command by renaming it into
|
||||||
|
# an empty string:
|
||||||
|
#
|
||||||
|
# rename-command KEYS ""
|
||||||
|
|
||||||
|
# The key-value size may so be quite different in many scenes, and use 256MiB as SST file size
|
||||||
|
# may cause data loading(large index/filter block) ineffective when the key-value was too small.
|
||||||
|
# kvrocks supports user-defined SST file in config(rocksdb.target_file_size_base),
|
||||||
|
# but it still too trivial and inconvenient to adjust the different sizes for different instances.
|
||||||
|
# so we want to periodic auto-adjust the SST size in-flight with user avg key-value size.
|
||||||
|
#
|
||||||
|
# If enabled, kvrocks will auto resize rocksdb.target_file_size_base
|
||||||
|
# and rocksdb.write_buffer_size in-flight with user avg key-value size.
|
||||||
|
# Please see #118.
|
||||||
|
#
|
||||||
|
# Default: yes
|
||||||
|
auto-resize-block-and-sst yes
|
||||||
|
|
||||||
|
################################ MIGRATE #####################################
|
||||||
|
# If the network bandwidth is completely consumed by the migration task,
|
||||||
|
# it will affect the availability of kvrocks. To avoid this situation,
|
||||||
|
# migrate-speed is adpoted to limit the migrating speed.
|
||||||
|
# Migrating speed is limited by controling the duraiton between sending data,
|
||||||
|
# the duation is calculated by: 1000000 * migrate-pipeline-size / migrate-speed (us).
|
||||||
|
# Value: [0,INT_MAX], 0 means no limit
|
||||||
|
#
|
||||||
|
# Default: 4096
|
||||||
|
migrate-speed 4096
|
||||||
|
|
||||||
|
# In order to reduce data transimission times and improve the efficiency of data migration,
|
||||||
|
# pipeline is adopted to send multiple data at once. Pipeline size can be set by this option.
|
||||||
|
# Value: [1, INT_MAX], it can't be 0
|
||||||
|
#
|
||||||
|
# Default: 16
|
||||||
|
migrate-pipeline-size 16
|
||||||
|
|
||||||
|
# In order to reduce the write forbidden time during migrating slot, we will migrate the incremetal
|
||||||
|
# data sevral times to reduce the amount of incremetal data. Until the quantity of incremetal
|
||||||
|
# data is reduced to a certain threshold, slot will be forbidden write. The threshold is set by
|
||||||
|
# this option.
|
||||||
|
# Value: [1, INT_MAX], it can't be 0
|
||||||
|
#
|
||||||
|
# Default: 10000
|
||||||
|
migrate-sequence-gap 10000
|
||||||
|
|
||||||
|
################################ ROCKSDB #####################################
|
||||||
|
|
||||||
|
# Specify the capacity of metadata column family block cache. Larger block cache
|
||||||
|
# may make request faster while more keys would be cached. Max Size is 200*1024.
|
||||||
|
# Default: 2048MB
|
||||||
|
rocksdb.metadata_block_cache_size 2048
|
||||||
|
|
||||||
|
# Specify the capacity of subkey column family block cache. Larger block cache
|
||||||
|
# may make request faster while more keys would be cached. Max Size is 200*1024.
|
||||||
|
# Default: 2048MB
|
||||||
|
rocksdb.subkey_block_cache_size 2048
|
||||||
|
|
||||||
|
# Metadata column family and subkey column family will share a single block cache
|
||||||
|
# if set 'yes'. The capacity of shared block cache is
|
||||||
|
# metadata_block_cache_size + subkey_block_cache_size
|
||||||
|
#
|
||||||
|
# Default: yes
|
||||||
|
rocksdb.share_metadata_and_subkey_block_cache yes
|
||||||
|
|
||||||
|
# A global cache for table-level rows in RocksDB. If almost always point
|
||||||
|
# lookups, enlarging row cache may improve read performance. Otherwise,
|
||||||
|
# if we enlarge this value, we can lessen metadata/subkey block cache size.
|
||||||
|
#
|
||||||
|
# Default: 0 (disabled)
|
||||||
|
rocksdb.row_cache_size 0
|
||||||
|
|
||||||
|
# Number of open files that can be used by the DB. You may need to
|
||||||
|
# increase this if your database has a large working set. Value -1 means
|
||||||
|
# files opened are always kept open. You can estimate number of files based
|
||||||
|
# on target_file_size_base and target_file_size_multiplier for level-based
|
||||||
|
# compaction. For universal-style compaction, you can usually set it to -1.
|
||||||
|
# Default: 4096
|
||||||
|
rocksdb.max_open_files 8096
|
||||||
|
|
||||||
|
# Amount of data to build up in memory (backed by an unsorted log
|
||||||
|
# on disk) before converting to a sorted on-disk file.
|
||||||
|
#
|
||||||
|
# Larger values increase performance, especially during bulk loads.
|
||||||
|
# Up to max_write_buffer_number write buffers may be held in memory
|
||||||
|
# at the same time,
|
||||||
|
# so you may wish to adjust this parameter to control memory usage.
|
||||||
|
# Also, a larger write buffer will result in a longer recovery time
|
||||||
|
# the next time the database is opened.
|
||||||
|
#
|
||||||
|
# Note that write_buffer_size is enforced per column family.
|
||||||
|
# See db_write_buffer_size for sharing memory across column families.
|
||||||
|
|
||||||
|
# default is 64MB
|
||||||
|
rocksdb.write_buffer_size 64
|
||||||
|
|
||||||
|
# Target file size for compaction, target file size for Leve N can be caculated
|
||||||
|
# by target_file_size_base * (target_file_size_multiplier ^ (L-1))
|
||||||
|
#
|
||||||
|
# Default: 128MB
|
||||||
|
rocksdb.target_file_size_base 128
|
||||||
|
|
||||||
|
# The maximum number of write buffers that are built up in memory.
|
||||||
|
# The default and the minimum number is 2, so that when 1 write buffer
|
||||||
|
# is being flushed to storage, new writes can continue to the other
|
||||||
|
# write buffer.
|
||||||
|
# If max_write_buffer_number > 3, writing will be slowed down to
|
||||||
|
# options.delayed_write_rate if we are writing to the last write buffer
|
||||||
|
# allowed.
|
||||||
|
rocksdb.max_write_buffer_number 4
|
||||||
|
|
||||||
|
# Maximum number of concurrent background compaction jobs, submitted to
|
||||||
|
# the default LOW priority thread pool.
|
||||||
|
rocksdb.max_background_compactions 4
|
||||||
|
|
||||||
|
# Maximum number of concurrent background memtable flush jobs, submitted by
|
||||||
|
# default to the HIGH priority thread pool. If the HIGH priority thread pool
|
||||||
|
# is configured to have zero threads, flush jobs will share the LOW priority
|
||||||
|
# thread pool with compaction jobs.
|
||||||
|
rocksdb.max_background_flushes 4
|
||||||
|
|
||||||
|
# This value represents the maximum number of threads that will
|
||||||
|
# concurrently perform a compaction job by breaking it into multiple,
|
||||||
|
# smaller ones that are run simultaneously.
|
||||||
|
# Default: 2 (i.e. no subcompactions)
|
||||||
|
rocksdb.max_sub_compactions 2
|
||||||
|
|
||||||
|
# In order to limit the size of WALs, RocksDB uses DBOptions::max_total_wal_size
|
||||||
|
# as the trigger of column family flush. Once WALs exceed this size, RocksDB
|
||||||
|
# will start forcing the flush of column families to allow deletion of some
|
||||||
|
# oldest WALs. This config can be useful when column families are updated at
|
||||||
|
# non-uniform frequencies. If there's no size limit, users may need to keep
|
||||||
|
# really old WALs when the infrequently-updated column families hasn't flushed
|
||||||
|
# for a while.
|
||||||
|
#
|
||||||
|
# In kvrocks, we use multiple column families to store metadata, subkeys, etc.
|
||||||
|
# If users always use string type, but use list, hash and other complex data types
|
||||||
|
# infrequently, there will be a lot of old WALs if we don't set size limit
|
||||||
|
# (0 by default in rocksdb), because rocksdb will dynamically choose the WAL size
|
||||||
|
# limit to be [sum of all write_buffer_size * max_write_buffer_number] * 4 if set to 0.
|
||||||
|
#
|
||||||
|
# Moreover, you should increase this value if you already set rocksdb.write_buffer_size
|
||||||
|
# to a big value, to avoid influencing the effect of rocksdb.write_buffer_size and
|
||||||
|
# rocksdb.max_write_buffer_number.
|
||||||
|
#
|
||||||
|
# default is 512MB
|
||||||
|
rocksdb.max_total_wal_size 512
|
||||||
|
|
||||||
|
# We impl the repliction with rocksdb WAL, it would trigger full sync when the seq was out of range.
|
||||||
|
# wal_ttl_seconds and wal_size_limit_mb would affect how archived logswill be deleted.
|
||||||
|
# If WAL_ttl_seconds is not 0, then WAL files will be checked every WAL_ttl_seconds / 2 and those that
|
||||||
|
# are older than WAL_ttl_seconds will be deleted#
|
||||||
|
#
|
||||||
|
# Default: 3 Hours
|
||||||
|
rocksdb.wal_ttl_seconds 10800
|
||||||
|
|
||||||
|
# If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
|
||||||
|
# WAL files will be checked every 10 min and if total size is greater
|
||||||
|
# then WAL_size_limit_MB, they will be deleted starting with the
|
||||||
|
# earliest until size_limit is met. All empty files will be deleted
|
||||||
|
# Default: 16GB
|
||||||
|
rocksdb.wal_size_limit_mb 16384
|
||||||
|
|
||||||
|
# Approximate size of user data packed per block. Note that the
|
||||||
|
# block size specified here corresponds to uncompressed data. The
|
||||||
|
# actual size of the unit read from disk may be smaller if
|
||||||
|
# compression is enabled.
|
||||||
|
#
|
||||||
|
# Default: 4KB
|
||||||
|
rocksdb.block_size 16384
|
||||||
|
|
||||||
|
# Indicating if we'd put index/filter blocks to the block cache
|
||||||
|
#
|
||||||
|
# Default: no
|
||||||
|
rocksdb.cache_index_and_filter_blocks yes
|
||||||
|
|
||||||
|
# Specify the compression to use. Only compress level greater
|
||||||
|
# than 2 to improve performance.
|
||||||
|
# Accept value: "no", "snappy"
|
||||||
|
# default snappy
|
||||||
|
rocksdb.compression snappy
|
||||||
|
|
||||||
|
# If non-zero, we perform bigger reads when doing compaction. If you're
|
||||||
|
# running RocksDB on spinning disks, you should set this to at least 2MB.
|
||||||
|
# That way RocksDB's compaction is doing sequential instead of random reads.
|
||||||
|
# When non-zero, we also force new_table_reader_for_compaction_inputs to
|
||||||
|
# true.
|
||||||
|
#
|
||||||
|
# Default: 2 MB
|
||||||
|
rocksdb.compaction_readahead_size 2097152
|
||||||
|
|
||||||
|
# he limited write rate to DB if soft_pending_compaction_bytes_limit or
|
||||||
|
# level0_slowdown_writes_trigger is triggered.
|
||||||
|
|
||||||
|
# If the value is 0, we will infer a value from `rater_limiter` value
|
||||||
|
# if it is not empty, or 16MB if `rater_limiter` is empty. Note that
|
||||||
|
# if users change the rate in `rate_limiter` after DB is opened,
|
||||||
|
# `delayed_write_rate` won't be adjusted.
|
||||||
|
#
|
||||||
|
rocksdb.delayed_write_rate 0
|
||||||
|
# If enable_pipelined_write is true, separate write thread queue is
|
||||||
|
# maintained for WAL write and memtable write.
|
||||||
|
#
|
||||||
|
# Default: no
|
||||||
|
rocksdb.enable_pipelined_write no
|
||||||
|
|
||||||
|
# Soft limit on number of level-0 files. We start slowing down writes at this
|
||||||
|
# point. A value <0 means that no writing slow down will be triggered by
|
||||||
|
# number of files in level-0.
|
||||||
|
#
|
||||||
|
# Default: 20
|
||||||
|
rocksdb.level0_slowdown_writes_trigger 20
|
||||||
|
|
||||||
|
# Maximum number of level-0 files. We stop writes at this point.
|
||||||
|
#
|
||||||
|
# Default: 40
|
||||||
|
rocksdb.level0_stop_writes_trigger 40
|
||||||
|
|
||||||
|
# Number of files to trigger level-0 compaction.
|
||||||
|
#
|
||||||
|
# Default: 4
|
||||||
|
rocksdb.level0_file_num_compaction_trigger 4
|
||||||
|
|
||||||
|
# if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
|
||||||
|
#
|
||||||
|
# Default: 0
|
||||||
|
rocksdb.stats_dump_period_sec 0
|
||||||
|
|
||||||
|
# if yes, the auto compaction would be disabled, but the manual compaction remain works
|
||||||
|
#
|
||||||
|
# Default: no
|
||||||
|
rocksdb.disable_auto_compactions no
|
||||||
|
|
||||||
|
# BlobDB(key-value separation) is essentially RocksDB for large-value use cases.
|
||||||
|
# Since 6.18.0, The new implementation is integrated into the RocksDB core.
|
||||||
|
# When set, large values (blobs) are written to separate blob files, and only
|
||||||
|
# pointers to them are stored in SST files. This can reduce write amplification
|
||||||
|
# for large-value use cases at the cost of introducing a level of indirection
|
||||||
|
# for reads. Please see: https://github.com/facebook/rocksdb/wiki/BlobDB.
|
||||||
|
#
|
||||||
|
# Note that when enable_blob_files is set to yes, BlobDB-related configuration
|
||||||
|
# items will take effect.
|
||||||
|
#
|
||||||
|
# Default: no
|
||||||
|
rocksdb.enable_blob_files no
|
||||||
|
|
||||||
|
# The size of the smallest value to be stored separately in a blob file. Values
|
||||||
|
# which have an uncompressed size smaller than this threshold are stored alongside
|
||||||
|
# the keys in SST files in the usual fashion.
|
||||||
|
#
|
||||||
|
# Default: 4096 byte, 0 means that all values are stored in blob files
|
||||||
|
rocksdb.min_blob_size 4096
|
||||||
|
|
||||||
|
# The size limit for blob files. When writing blob files, a new file is
|
||||||
|
# opened once this limit is reached.
|
||||||
|
#
|
||||||
|
# Default: 268435456 bytes
|
||||||
|
rocksdb.blob_file_size 268435456
|
||||||
|
|
||||||
|
# Enables garbage collection of blobs. Valid blobs residing in blob files
|
||||||
|
# older than a cutoff get relocated to new files as they are encountered
|
||||||
|
# during compaction, which makes it possible to clean up blob files once
|
||||||
|
# they contain nothing but obsolete/garbage blobs.
|
||||||
|
# See also rocksdb.blob_garbage_collection_age_cutoff below.
|
||||||
|
#
|
||||||
|
# Default: yes
|
||||||
|
rocksdb.enable_blob_garbage_collection yes
|
||||||
|
|
||||||
|
# The percentage cutoff in terms of blob file age for garbage collection.
|
||||||
|
# Blobs in the oldest N blob files will be relocated when encountered during
|
||||||
|
# compaction, where N = (garbage_collection_cutoff/100) * number_of_blob_files.
|
||||||
|
# Note that this value must belong to [0, 100].
|
||||||
|
#
|
||||||
|
# Default: 25
|
||||||
|
rocksdb.blob_garbage_collection_age_cutoff 25
|
||||||
|
|
||||||
|
|
||||||
|
# The purpose of following three options are to dynamically adjust the upper limit of
|
||||||
|
# the data that each layer can store according to the size of the different
|
||||||
|
# layers of the LSM. Enabling this option will bring some improvements in
|
||||||
|
# deletion efficiency and space amplification, but it will lose a certain
|
||||||
|
# amount of read performance.
|
||||||
|
# If you want know more details about Levels' Target Size, you can read RocksDB wiki:
|
||||||
|
# https://github.com/facebook/rocksdb/wiki/Leveled-Compaction#levels-target-size
|
||||||
|
#
|
||||||
|
# Default: no
|
||||||
|
rocksdb.level_compaction_dynamic_level_bytes no
|
||||||
|
|
||||||
|
# The total file size of level-1 sst.
|
||||||
|
#
|
||||||
|
# Default: 268435456 bytes
|
||||||
|
rocksdb.max_bytes_for_level_base 268435456
|
||||||
|
|
||||||
|
# Multiplication factor for the total file size of L(n+1) layers.
|
||||||
|
# This option is a double type number in RocksDB, but kvrocks is
|
||||||
|
# not support double data type number yet, so we use int data
|
||||||
|
# number instead of double currently.
|
||||||
|
#
|
||||||
|
# Default: 10
|
||||||
|
rocksdb.max_bytes_for_level_multiplier 10
|
||||||
|
|
||||||
|
################################ NAMESPACE #####################################
|
||||||
|
# namespace.test change.me
|
|
@ -11,6 +11,11 @@ sudo apt-get update
|
||||||
sudo apt-get install python3-pip virtualenv python3-dev python3-tk libfreetype6-dev \
|
sudo apt-get install python3-pip virtualenv python3-dev python3-tk libfreetype6-dev \
|
||||||
screen g++ python-tk unzip libsnappy-dev cmake -qq
|
screen g++ python-tk unzip libsnappy-dev cmake -qq
|
||||||
|
|
||||||
|
# update virtualenv
|
||||||
|
# python3-dev | python3-nose
|
||||||
|
|
||||||
|
# python3-tk | python3-numpy??????
|
||||||
|
|
||||||
#Needed for downloading jemalloc
|
#Needed for downloading jemalloc
|
||||||
sudo apt-get install wget -qq
|
sudo apt-get install wget -qq
|
||||||
|
|
||||||
|
@ -84,11 +89,21 @@ pushd ardb/
|
||||||
make
|
make
|
||||||
popd
|
popd
|
||||||
|
|
||||||
# KVROCKS #
|
DEFAULT_HOME=$(pwd)
|
||||||
# test ! -d kvrocks/ && git clone https://github.com/bitleak/kvrocks.git
|
|
||||||
# pushd kvrocks/
|
#### KVROCKS ####
|
||||||
# make -j4
|
test ! -d kvrocks/ && git clone https://github.com/apache/incubator-kvrocks.git kvrocks
|
||||||
# popd
|
pushd kvrocks
|
||||||
|
./build.sh build
|
||||||
|
popd
|
||||||
|
|
||||||
|
DEFAULT_KVROCKS_DATA=$DEFAULT_HOME/DATA_KVROCKS
|
||||||
|
mkdir -p $DEFAULT_KVROCKS_DATA
|
||||||
|
|
||||||
|
sed -i "s|dir /tmp/kvrocks|dir ${DEFAULT_KVROCKS_DATA}|1" $DEFAULT_HOME/configs/6383.conf
|
||||||
|
##-- KVROCKS --##
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Config File
|
# Config File
|
||||||
if [ ! -f configs/core.cfg ]; then
|
if [ ! -f configs/core.cfg ]; then
|
||||||
|
|
Loading…
Reference in a new issue