1. set

sadd fans ff

sadd fans bb

sadd fans cc

sadd followers cc

smembers fans

srem fans ff

spop fans

scard fans                              # return the size of fans

sismember fans cc

sinter fans followers              # return the intersection of two sets

set foo 0                                   # add a new string

sinterstore foo fans                # 将’fans’的内容copy到’foo’中,并将’foo’转为set表

sinterstore foo fans followers         # 将’fans’和‘followers’的交集的内容copy到’foo’中,并将’foo’转为set表

srandmember(foo);

2. zset

zadd students 1 90

zadd students 2 85

zadd students 3 87

zadd students 4 93

zrange students 0 -1

zrangebyscore students 3 5

zcount students 3 5

zcard students

zscore students 87

zrank students 87

zremrangebyrank students 1 3

zadd teachers 1 85

zunionstore student_teachers students teachers

 

3. list

rpush foo bar

lpush foo bar1

rpushx foo bar

llen foo

lrange foo 0 -1

lindex foo 1

lset foo 1 123

lrem foo 1 _

lpop foo

rpop foo

linsert foo before bar before_bar

 

4. hash

hset table key1 v1

hget table key1

hexists table key1

hdel table key1

hlen table

hsetx table key1 v2

hmget table

hkeys table

hvals table

hgetall table

 

5. string

set str val

setex str 10 bar

setnx str 12

getset str 56

exists str

del str

type str

setrange str 0 abc

substr str 0 2

strlen str

randomkey

rename str str1

expire foo 1

ttl foo

dbsize

 

MISCONF Redis is configured to save RDB snapshots, but is currently not able to persist on disk. Commands that may modify the data set are disabled. Please check Redis logs for details about the error.
> config set stop-writes-on-bgsave-error no

eval "return redis.call('set','foo','bar')" 0 
eval "return redis.call('set',KEYS[1],'bar')" 1 foo
eval "return {1,2,{3,'Hello World!'}}" 0
eval "return redis.call('get','foo')" 0
eval "return {1,2,3.3333,'foo',nil,'bar'}" 0           # returns: 1,2,3,foo

 

Try the link here

Here is a copy of migration for two individual queries for logstash.conf:

input {
  jdbc {
    jdbc_connection_string => "jdbc:mysql://localhost:3306/testdb?useSSL=false&serverTimezone=UTC&useUnicode=true&characterEncoding=utf-8&autoReconnect=true&rewriteBatchedStatements=true"
    # The user we wish to execute our statement as
    jdbc_user => "root"
    jdbc_password => "123456"
    # The path to our downloaded jdbc driver
    jdbc_driver_library => "E:\repo\mysql\mysql-connector-java\6.0.6\mysql-connector-java-6.0.6.jar"
    jdbc_driver_class => "com.mysql.cj.jdbc.Driver"
    # our query
    statement => "SELECT * FROM click_tripadvisor where rid > :sql_last_value"
    use_column_value => true
    tracking_column => "rid"
    jdbc_page_size => 1000
    jdbc_paging_enabled => true
    type => "click_tripadvisor"
    last_run_metadata_path => "C:\users\dt295\click_tripadvisor.logstash_jdbc_last_run"
    }

  jdbc {
    jdbc_connection_string => "jdbc:mysql://localhost:3306/testdb?useSSL=false&serverTimezone=UTC&useUnicode=true&characterEncoding=utf-8&autoReconnect=true&rewriteBatchedStatements=true"
    # The user we wish to execute our statement as
    jdbc_user => "root"
    jdbc_password => "123456"
    # The path to our downloaded jdbc driver
    jdbc_driver_library => "E:\repo\mysql\mysql-connector-java\6.0.6\mysql-connector-java-6.0.6.jar"
    jdbc_driver_class => "com.mysql.cj.jdbc.Driver"
    # our query
    statement => "SELECT * FROM tracking_booking where id > :sql_last_value"
    use_column_value => true
    tracking_column => "id"
    jdbc_page_size => 1000
    jdbc_paging_enabled => true
    type => "tracking_booking"
    clean_run => "true"
    last_run_metadata_path => "C:\users\dt295\tracking_booking.logstash_jdbc_last_run"
    }
}
output {
  stdout { codec => json_lines }
  if[type] == "click_tripadvisor" {
    elasticsearch {
    "hosts" => "localhost:9200"
    "index" => "click_tripadvisor"
    "document_type" => "data"
    "document_id" => "%{rid}"
    }
  }

  if[type] == "tracking_booking" {
    elasticsearch {
    "hosts" => "localhost:9200"
    "index" => "tracking_booking"
    "document_type" => "tracking_booking"
    "document_id" => "%{id}"
    }
  }
}

 

 

 

1. install the shadowsocks server

wget --no-check-certificate https://raw.githubusercontent.com/teddysun/shadowsocks_install/master/shadowsocks.sh

chmod +x shadowsocks.sh
./shadowsocks.sh 2>&1 | tee shadowsocks.log
vim /etc/shadowsocks.json

2.  configure the server side

{
    "server":"0.0.0.0",
    "server_port":8388,
    "local_address":"127.0.0.1",
    "local_port":1080,
    "password":"pass",
    "timeout":300,
    "method":"aes-256-cfb",
    "fast_open":false
}

3. expose the server_port to the remote host

4. related command

# start:
/etc/init.d/shadowsocks start

# stop: 
/etc/init.d/shadowsocks stop

# restart: 
/etc/init.d/shadowsocks restart

# status: 
/etc/init.d/shadowsocks status

 

Refer:

  1. Shadowsocks Python版一键安装脚本

You can check the MySQL server’s timezone by the following command

SELECT @@global.time_zone; # check the global time zone 
SELECT @@session.time_zone; # check the current session timezone 
SELECT TIMEDIFF(NOW(), UTC_TIMESTAMP); # check the current time zone compared to UTC
SELECT UNIX_TIMESTAMP(); # check the current system timestamp
SELECT UNIX_TIMESTAMP(NOW());


SET time_zone = 'Europe/Helsinki';
SET time_zone = "+00:00";
SET @@session.time_zone = "+00:00"; 


 

Refer:

  1. Should MySQL have its timezone set to UTC?

Version: MySQL 5.7

Graphic describing InnoDB Group Replication, MySQL Router and MySQL Shell

  • create 3 sandbox instance:

after login to mysqljs:

dba.deploySandboxInstance(3310);
dba.deploySandboxInstance(3320);
dba.deploySandboxInstance(3330);

\connect root@localhost:3310

var cluster = dba.createCluster('DevCluster');


cluster.status(); 

cluster.addInstance('root@localhost:3320');
cluster.addInstance('root@localhost:3330');

cluster.status();



sudo mysqlrouter --bootstrap localhost:3310

mysqlsh --uri root@localhost:6446 --sql

select @@port;


 

explain SELECT * FROM `click_tripadvisor` where refid='W@OYAAoQK3oAAv-wD0AAAADF';
explain SELECT * FROM `click_tripadvisor` where provider_hotel_code='HOMRST';

set global slow_query_log=1; # open the slow_query_log option, to log the slow query into given file

show global variables like '%slow%';

select @@global.long_query_time; # default long query time

set global long_query_time = 0; # set 0 second to show the queery time

set global slow_query_log=0; # close the slow_query_log option

find / -name 'ip-10-200-107-82-slow.log'
watch ls -lh /usr/local/mariadb-10.0.36-linux-x86_64/data/ip-10-200-107-82-slow.log


# download pt-query-digest
wget percona.com/get/pt-query-digest
chmod u+x pt-query-digest

# analyze the slow log
./pt-query-digest /usr/local/mariadb-10.0.36-linux-x86_64/data/ip-10-200-107-82-slow.log > slow-query.log.out
yum install -y perl-libwww-perl perl-Time-HiRes


# analyze the slow log use the tcpdump
tcpdump -s 65535 -x -nn -q -tttt -i any -c 9999999 port 3306 | gzip -c > /tmp/tcpdump.txt.gz 
gunzip -c /tmp/tcpdump.txt.gz | ./pt-query-digest --type tcpdump > /tmp/digest_output.txt


# dump mysql data
mongodump --host localhost --port 27017 --db dacms --out mongo_bak/2018-11-16 -u mongodb -p pass --authenticationDatabase admin
mysqldump -uroot -p --routines --events --all-databases --force > all-database.sql

# PT 2.1
 pt-query-digest --processlist h=localhost --print --no-report --interval=0.01 > slow.log
# PT 2.2
 pt-query-digest --processlist h=localhost --interval=0.01 --output=slowlog > slow.log 

# filter the slow log by the given conditions
pt-query-digest --since '2015-04-10 00:00:00' --util '2015-04-10-23:59:59' /usr/local/mariadb-10.0.36-linux-x86_64/data/ip-10-200-107-82-slow.log > slowquery.log
pt-query-digest --filter '$event->{db} && $event->{db} =~ /`linkcenter-data`/' /usr/local/mariadb-10.0.36-linux-x86_64/data/ip-10-200-107-82-slow.log > employee-db-slowquery.log
pt-query-digest --filter '$event->{db} && $event->{db} =~ /market/' /usr/local/mariadb-10.0.36-linux-x86_64/data/ip-10-200-107-82-slow.log > market-db-slowquery.log
pt-query-digest --filter '$event->{user} && $event->{user} =~ /root/' /usr/local/mariadb-10.0.36-linux-x86_64/data/ip-10-200-107-82-slow.log > root-user-slowquery.log


# https://www.percona.com/blog/2011/12/29/identifying-the-load-with-the-help-of-pt-query-digest-and-percona-server/
# min_examined_row_limit: how many records query needs to analyze before it gets written to the query log
# log_queries_not_using_indexes: log ALL queries that didn't use an index
# log_output: FILE or TABLE. keep it set to FILE
# show_query_log_use_global_variable: it controls which variables will use global rather than session variables
# log_all_rate_limit: e.g. log_slow_rate_limit=10 means only log 1/10th of sessions regradless of long_query_time
# log_slow_rate_type: can be set to query in which case log_slow_rate_limit will limit by query, not by session
# slow_query_log_always_write_time: will log queries that take this long regardless of the rate limiter setting
# log_slow_slave_statements: replication statements are not logged by default
# log_slow_sp_statements: useful with stored routines
# 




# show the actual data size and index size
select engine, count(*) as TABLES, 
  concat(round(sum(TABLE_ROWS)/1000000,2), 'M') rows, 
  concat(round(sum(data_length)/(1024*1024*1024),2), 'G') DATA,
  concat(round(sum(index_length)/(1024*1024*1024),2), 'G') idx,
  concat(round(sum(data_length+index_length)/(1024*1024*1024),2), 'G') total_size,
  round(sum(index_length)/sum(data_length), 2) idxfrac
from information_schema.TABLES
where table_schema not in ('mysql', 'performance_schema', 'information_schema')
group by ENGINE
ORDER BY SUM(data_length + index_length) DESC LIMIT 10;

show variables like '%innodb_buffer_pool_size%';
show variables like '%packet%';

# catch teh actual innodb buffer read stream
mysqladmin -uroot -pderbysoft ext -ri1 | grep Innodb_buffer_pool_reads




# use mysqltunner to tune mysql configuration
wget http://mysqltuner.pl/ -O mysqltuner.pl
perl mysqltuner.pl --user root --pass derbysoft

# temporarily enable the query log. You can view the details of execution
set long_query_time=0;
perl mysqldumpslow.pl xxxx-slow.log
tail -fn100 xxx-slow.log