diff --git a/include/MySQL_HostGroups_Manager.h b/include/MySQL_HostGroups_Manager.h index 9bdf7b5e89..e379473dac 100644 --- a/include/MySQL_HostGroups_Manager.h +++ b/include/MySQL_HostGroups_Manager.h @@ -40,19 +40,19 @@ // we have 2 versions of the same tables: with (debug) and without (no debug) checks #ifdef DEBUG -#define MYHGM_MYSQL_SERVERS "CREATE TABLE mysql_servers ( hostgroup_id INT NOT NULL DEFAULT 0 , hostname VARCHAR NOT NULL , port INT NOT NULL DEFAULT 3306 , gtid_port INT NOT NULL DEFAULT 0 , weight INT CHECK (weight >= 0) NOT NULL DEFAULT 1 , status INT CHECK (status IN (0, 1, 2, 3, 4)) NOT NULL DEFAULT 0 , compression INT CHECK (compression >=0 AND compression <= 102400) NOT NULL DEFAULT 0 , max_connections INT CHECK (max_connections >=0) NOT NULL DEFAULT 1000 , max_replication_lag INT CHECK (max_replication_lag >= 0 AND max_replication_lag <= 126144000) NOT NULL DEFAULT 0 , use_ssl INT CHECK (use_ssl IN(0,1)) NOT NULL DEFAULT 0 , max_latency_ms INT UNSIGNED CHECK (max_latency_ms>=0) NOT NULL DEFAULT 0 , comment VARCHAR NOT NULL DEFAULT '' , mem_pointer INT NOT NULL DEFAULT 0 , PRIMARY KEY (hostgroup_id, hostname, port) )" -#define MYHGM_MYSQL_SERVERS_INCOMING "CREATE TABLE mysql_servers_incoming ( hostgroup_id INT NOT NULL DEFAULT 0 , hostname VARCHAR NOT NULL , port INT NOT NULL DEFAULT 3306 , gtid_port INT NOT NULL DEFAULT 0 , weight INT CHECK (weight >= 0) NOT NULL DEFAULT 1 , status INT CHECK (status IN (0, 1, 2, 3, 4)) NOT NULL DEFAULT 0 , compression INT CHECK (compression >=0 AND compression <= 102400) NOT NULL DEFAULT 0 , max_connections INT CHECK (max_connections >=0) NOT NULL DEFAULT 1000 , max_replication_lag INT CHECK (max_replication_lag >= 0 AND max_replication_lag <= 126144000) NOT NULL DEFAULT 0 , use_ssl INT CHECK (use_ssl IN(0,1)) NOT NULL DEFAULT 0 , max_latency_ms INT UNSIGNED CHECK (max_latency_ms>=0) NOT NULL DEFAULT 0 , comment VARCHAR NOT NULL DEFAULT '' , PRIMARY KEY (hostgroup_id, hostname, port))" +#define MYHGM_MYSQL_SERVERS "CREATE TABLE IF NOT EXISTS mysql_servers ( hostgroup_id INT NOT NULL DEFAULT 0 , hostname VARCHAR NOT NULL , port INT NOT NULL DEFAULT 3306 , gtid_port INT NOT NULL DEFAULT 0 , weight INT CHECK (weight >= 0) NOT NULL DEFAULT 1 , status INT CHECK (status IN (0, 1, 2, 3, 4)) NOT NULL DEFAULT 0 , compression INT CHECK (compression >=0 AND compression <= 102400) NOT NULL DEFAULT 0 , max_connections INT CHECK (max_connections >=0) NOT NULL DEFAULT 1000 , max_replication_lag INT CHECK (max_replication_lag >= 0 AND max_replication_lag <= 126144000) NOT NULL DEFAULT 0 , use_ssl INT CHECK (use_ssl IN(0,1)) NOT NULL DEFAULT 0 , max_latency_ms INT UNSIGNED CHECK (max_latency_ms>=0) NOT NULL DEFAULT 0 , comment VARCHAR NOT NULL DEFAULT '' , mem_pointer INT NOT NULL DEFAULT 0 , PRIMARY KEY (hostgroup_id, hostname, port) )" +#define MYHGM_MYSQL_SERVERS_INCOMING "CREATE TABLE IF NOT EXISTS mysql_servers_incoming ( hostgroup_id INT NOT NULL DEFAULT 0 , hostname VARCHAR NOT NULL , port INT NOT NULL DEFAULT 3306 , gtid_port INT NOT NULL DEFAULT 0 , weight INT CHECK (weight >= 0) NOT NULL DEFAULT 1 , status INT CHECK (status IN (0, 1, 2, 3, 4)) NOT NULL DEFAULT 0 , compression INT CHECK (compression >=0 AND compression <= 102400) NOT NULL DEFAULT 0 , max_connections INT CHECK (max_connections >=0) NOT NULL DEFAULT 1000 , max_replication_lag INT CHECK (max_replication_lag >= 0 AND max_replication_lag <= 126144000) NOT NULL DEFAULT 0 , use_ssl INT CHECK (use_ssl IN(0,1)) NOT NULL DEFAULT 0 , max_latency_ms INT UNSIGNED CHECK (max_latency_ms>=0) NOT NULL DEFAULT 0 , comment VARCHAR NOT NULL DEFAULT '' , PRIMARY KEY (hostgroup_id, hostname, port))" #else -#define MYHGM_MYSQL_SERVERS "CREATE TABLE mysql_servers ( hostgroup_id INT NOT NULL DEFAULT 0 , hostname VARCHAR NOT NULL , port INT NOT NULL DEFAULT 3306 , gtid_port INT NOT NULL DEFAULT 0 , weight INT NOT NULL DEFAULT 1 , status INT NOT NULL DEFAULT 0 , compression INT NOT NULL DEFAULT 0 , max_connections INT NOT NULL DEFAULT 1000 , max_replication_lag INT NOT NULL DEFAULT 0 , use_ssl INT NOT NULL DEFAULT 0 , max_latency_ms INT UNSIGNED NOT NULL DEFAULT 0 , comment VARCHAR NOT NULL DEFAULT '' , mem_pointer INT NOT NULL DEFAULT 0 , PRIMARY KEY (hostgroup_id, hostname, port) )" -#define MYHGM_MYSQL_SERVERS_INCOMING "CREATE TABLE mysql_servers_incoming ( hostgroup_id INT NOT NULL DEFAULT 0 , hostname VARCHAR NOT NULL , port INT NOT NULL DEFAULT 3306 , gtid_port INT NOT NULL DEFAULT 0 , weight INT NOT NULL DEFAULT 1 , status INT NOT NULL DEFAULT 0 , compression INT NOT NULL DEFAULT 0 , max_connections INT NOT NULL DEFAULT 1000 , max_replication_lag INT NOT NULL DEFAULT 0 , use_ssl INT NOT NULL DEFAULT 0 , max_latency_ms INT UNSIGNED NOT NULL DEFAULT 0 , comment VARCHAR NOT NULL DEFAULT '' , PRIMARY KEY (hostgroup_id, hostname, port))" +#define MYHGM_MYSQL_SERVERS "CREATE TABLE IF NOT EXISTS mysql_servers ( hostgroup_id INT NOT NULL DEFAULT 0 , hostname VARCHAR NOT NULL , port INT NOT NULL DEFAULT 3306 , gtid_port INT NOT NULL DEFAULT 0 , weight INT NOT NULL DEFAULT 1 , status INT NOT NULL DEFAULT 0 , compression INT NOT NULL DEFAULT 0 , max_connections INT NOT NULL DEFAULT 1000 , max_replication_lag INT NOT NULL DEFAULT 0 , use_ssl INT NOT NULL DEFAULT 0 , max_latency_ms INT UNSIGNED NOT NULL DEFAULT 0 , comment VARCHAR NOT NULL DEFAULT '' , mem_pointer INT NOT NULL DEFAULT 0 , PRIMARY KEY (hostgroup_id, hostname, port) )" +#define MYHGM_MYSQL_SERVERS_INCOMING "CREATE TABLE IF NOT EXISTS mysql_servers_incoming ( hostgroup_id INT NOT NULL DEFAULT 0 , hostname VARCHAR NOT NULL , port INT NOT NULL DEFAULT 3306 , gtid_port INT NOT NULL DEFAULT 0 , weight INT NOT NULL DEFAULT 1 , status INT NOT NULL DEFAULT 0 , compression INT NOT NULL DEFAULT 0 , max_connections INT NOT NULL DEFAULT 1000 , max_replication_lag INT NOT NULL DEFAULT 0 , use_ssl INT NOT NULL DEFAULT 0 , max_latency_ms INT UNSIGNED NOT NULL DEFAULT 0 , comment VARCHAR NOT NULL DEFAULT '' , PRIMARY KEY (hostgroup_id, hostname, port))" #endif /* DEBUG */ -#define MYHGM_MYSQL_REPLICATION_HOSTGROUPS "CREATE TABLE mysql_replication_hostgroups (writer_hostgroup INT CHECK (writer_hostgroup>=0) NOT NULL PRIMARY KEY , reader_hostgroup INT NOT NULL CHECK (reader_hostgroup<>writer_hostgroup AND reader_hostgroup>=0) , check_type VARCHAR CHECK (LOWER(check_type) IN ('read_only','innodb_read_only','super_read_only','read_only|innodb_read_only','read_only&innodb_read_only')) NOT NULL DEFAULT 'read_only' , comment VARCHAR NOT NULL DEFAULT '' , UNIQUE (reader_hostgroup))" +#define MYHGM_MYSQL_REPLICATION_HOSTGROUPS "CREATE TABLE IF NOT EXISTS mysql_replication_hostgroups (writer_hostgroup INT CHECK (writer_hostgroup>=0) NOT NULL PRIMARY KEY , reader_hostgroup INT NOT NULL CHECK (reader_hostgroup<>writer_hostgroup AND reader_hostgroup>=0) , check_type VARCHAR CHECK (LOWER(check_type) IN ('read_only','innodb_read_only','super_read_only','read_only|innodb_read_only','read_only&innodb_read_only')) NOT NULL DEFAULT 'read_only' , comment VARCHAR NOT NULL DEFAULT '' , UNIQUE (reader_hostgroup))" -#define MYHGM_MYSQL_GROUP_REPLICATION_HOSTGROUPS "CREATE TABLE mysql_group_replication_hostgroups (writer_hostgroup INT CHECK (writer_hostgroup>=0) NOT NULL PRIMARY KEY , backup_writer_hostgroup INT CHECK (backup_writer_hostgroup>=0 AND backup_writer_hostgroup<>writer_hostgroup) NOT NULL , reader_hostgroup INT NOT NULL CHECK (reader_hostgroup<>writer_hostgroup AND backup_writer_hostgroup<>reader_hostgroup AND reader_hostgroup>0) , offline_hostgroup INT NOT NULL CHECK (offline_hostgroup<>writer_hostgroup AND offline_hostgroup<>reader_hostgroup AND backup_writer_hostgroup<>offline_hostgroup AND offline_hostgroup>=0) , active INT CHECK (active IN (0,1)) NOT NULL DEFAULT 1 , max_writers INT NOT NULL CHECK (max_writers >= 0) DEFAULT 1 , writer_is_also_reader INT CHECK (writer_is_also_reader IN (0,1,2)) NOT NULL DEFAULT 0 , max_transactions_behind INT CHECK (max_transactions_behind>=0) NOT NULL DEFAULT 0 , comment VARCHAR , UNIQUE (reader_hostgroup) , UNIQUE (offline_hostgroup) , UNIQUE (backup_writer_hostgroup))" +#define MYHGM_MYSQL_GROUP_REPLICATION_HOSTGROUPS "CREATE TABLE IF NOT EXISTS mysql_group_replication_hostgroups (writer_hostgroup INT CHECK (writer_hostgroup>=0) NOT NULL PRIMARY KEY , backup_writer_hostgroup INT CHECK (backup_writer_hostgroup>=0 AND backup_writer_hostgroup<>writer_hostgroup) NOT NULL , reader_hostgroup INT NOT NULL CHECK (reader_hostgroup<>writer_hostgroup AND backup_writer_hostgroup<>reader_hostgroup AND reader_hostgroup>0) , offline_hostgroup INT NOT NULL CHECK (offline_hostgroup<>writer_hostgroup AND offline_hostgroup<>reader_hostgroup AND backup_writer_hostgroup<>offline_hostgroup AND offline_hostgroup>=0) , active INT CHECK (active IN (0,1)) NOT NULL DEFAULT 1 , max_writers INT NOT NULL CHECK (max_writers >= 0) DEFAULT 1 , writer_is_also_reader INT CHECK (writer_is_also_reader IN (0,1,2)) NOT NULL DEFAULT 0 , max_transactions_behind INT CHECK (max_transactions_behind>=0) NOT NULL DEFAULT 0 , comment VARCHAR , UNIQUE (reader_hostgroup) , UNIQUE (offline_hostgroup) , UNIQUE (backup_writer_hostgroup))" -#define MYHGM_MYSQL_GALERA_HOSTGROUPS "CREATE TABLE mysql_galera_hostgroups (writer_hostgroup INT CHECK (writer_hostgroup>=0) NOT NULL PRIMARY KEY , backup_writer_hostgroup INT CHECK (backup_writer_hostgroup>=0 AND backup_writer_hostgroup<>writer_hostgroup) NOT NULL , reader_hostgroup INT NOT NULL CHECK (reader_hostgroup<>writer_hostgroup AND backup_writer_hostgroup<>reader_hostgroup AND reader_hostgroup>0) , offline_hostgroup INT NOT NULL CHECK (offline_hostgroup<>writer_hostgroup AND offline_hostgroup<>reader_hostgroup AND backup_writer_hostgroup<>offline_hostgroup AND offline_hostgroup>=0) , active INT CHECK (active IN (0,1)) NOT NULL DEFAULT 1 , max_writers INT NOT NULL CHECK (max_writers >= 0) DEFAULT 1 , writer_is_also_reader INT CHECK (writer_is_also_reader IN (0,1,2)) NOT NULL DEFAULT 0 , max_transactions_behind INT CHECK (max_transactions_behind>=0) NOT NULL DEFAULT 0 , comment VARCHAR , UNIQUE (reader_hostgroup) , UNIQUE (offline_hostgroup) , UNIQUE (backup_writer_hostgroup))" +#define MYHGM_MYSQL_GALERA_HOSTGROUPS "CREATE TABLE IF NOT EXISTS mysql_galera_hostgroups (writer_hostgroup INT CHECK (writer_hostgroup>=0) NOT NULL PRIMARY KEY , backup_writer_hostgroup INT CHECK (backup_writer_hostgroup>=0 AND backup_writer_hostgroup<>writer_hostgroup) NOT NULL , reader_hostgroup INT NOT NULL CHECK (reader_hostgroup<>writer_hostgroup AND backup_writer_hostgroup<>reader_hostgroup AND reader_hostgroup>0) , offline_hostgroup INT NOT NULL CHECK (offline_hostgroup<>writer_hostgroup AND offline_hostgroup<>reader_hostgroup AND backup_writer_hostgroup<>offline_hostgroup AND offline_hostgroup>=0) , active INT CHECK (active IN (0,1)) NOT NULL DEFAULT 1 , max_writers INT NOT NULL CHECK (max_writers >= 0) DEFAULT 1 , writer_is_also_reader INT CHECK (writer_is_also_reader IN (0,1,2)) NOT NULL DEFAULT 0 , max_transactions_behind INT CHECK (max_transactions_behind>=0) NOT NULL DEFAULT 0 , comment VARCHAR , UNIQUE (reader_hostgroup) , UNIQUE (offline_hostgroup) , UNIQUE (backup_writer_hostgroup))" -#define MYHGM_MYSQL_AWS_AURORA_HOSTGROUPS "CREATE TABLE mysql_aws_aurora_hostgroups (writer_hostgroup INT CHECK (writer_hostgroup>=0) NOT NULL PRIMARY KEY , reader_hostgroup INT NOT NULL CHECK (reader_hostgroup<>writer_hostgroup AND reader_hostgroup>0) , " \ +#define MYHGM_MYSQL_AWS_AURORA_HOSTGROUPS "CREATE TABLE IF NOT EXISTS mysql_aws_aurora_hostgroups (writer_hostgroup INT CHECK (writer_hostgroup>=0) NOT NULL PRIMARY KEY , reader_hostgroup INT NOT NULL CHECK (reader_hostgroup<>writer_hostgroup AND reader_hostgroup>0) , " \ "active INT CHECK (active IN (0,1)) NOT NULL DEFAULT 1 , aurora_port INT NOT NUlL DEFAULT 3306 , domain_name VARCHAR NOT NULL DEFAULT '' , " \ "max_lag_ms INT NOT NULL CHECK (max_lag_ms>= 10 AND max_lag_ms <= 600000) DEFAULT 600000 , " \ "check_interval_ms INT NOT NULL CHECK (check_interval_ms >= 100 AND check_interval_ms <= 600000) DEFAULT 1000 , " \ @@ -66,10 +66,10 @@ #define MYHGM_GEN_ADMIN_RUNTIME_SERVERS "SELECT hostgroup_id, hostname, port, gtid_port, CASE status WHEN 0 THEN \"ONLINE\" WHEN 1 THEN \"SHUNNED\" WHEN 2 THEN \"OFFLINE_SOFT\" WHEN 3 THEN \"OFFLINE_HARD\" WHEN 4 THEN \"SHUNNED\" END status, weight, compression, max_connections, max_replication_lag, use_ssl, max_latency_ms, comment FROM mysql_servers ORDER BY hostgroup_id, hostname, port" -#define MYHGM_MYSQL_HOSTGROUP_ATTRIBUTES "CREATE TABLE mysql_hostgroup_attributes (hostgroup_id INT NOT NULL PRIMARY KEY , max_num_online_servers INT CHECK (max_num_online_servers>=0 AND max_num_online_servers <= 1000000) NOT NULL DEFAULT 1000000 , autocommit INT CHECK (autocommit IN (-1, 0, 1)) NOT NULL DEFAULT -1 , free_connections_pct INT CHECK (free_connections_pct >= 0 AND free_connections_pct <= 100) NOT NULL DEFAULT 10 , init_connect VARCHAR NOT NULL DEFAULT '' , multiplex INT CHECK (multiplex IN (0, 1)) NOT NULL DEFAULT 1 , connection_warming INT CHECK (connection_warming IN (0, 1)) NOT NULL DEFAULT 0 , throttle_connections_per_sec INT CHECK (throttle_connections_per_sec >= 1 AND throttle_connections_per_sec <= 1000000) NOT NULL DEFAULT 1000000 , ignore_session_variables VARCHAR CHECK (JSON_VALID(ignore_session_variables) OR ignore_session_variables = '') NOT NULL DEFAULT '' , hostgroup_settings VARCHAR CHECK (JSON_VALID(hostgroup_settings) OR hostgroup_settings = '') NOT NULL DEFAULT '' , servers_defaults VARCHAR CHECK (JSON_VALID(servers_defaults) OR servers_defaults = '') NOT NULL DEFAULT '' , comment VARCHAR NOT NULL DEFAULT '')" +#define MYHGM_MYSQL_HOSTGROUP_ATTRIBUTES "CREATE TABLE IF NOT EXISTS mysql_hostgroup_attributes (hostgroup_id INT NOT NULL PRIMARY KEY , max_num_online_servers INT CHECK (max_num_online_servers>=0 AND max_num_online_servers <= 1000000) NOT NULL DEFAULT 1000000 , autocommit INT CHECK (autocommit IN (-1, 0, 1)) NOT NULL DEFAULT -1 , free_connections_pct INT CHECK (free_connections_pct >= 0 AND free_connections_pct <= 100) NOT NULL DEFAULT 10 , init_connect VARCHAR NOT NULL DEFAULT '' , multiplex INT CHECK (multiplex IN (0, 1)) NOT NULL DEFAULT 1 , connection_warming INT CHECK (connection_warming IN (0, 1)) NOT NULL DEFAULT 0 , throttle_connections_per_sec INT CHECK (throttle_connections_per_sec >= 1 AND throttle_connections_per_sec <= 1000000) NOT NULL DEFAULT 1000000 , ignore_session_variables VARCHAR CHECK (JSON_VALID(ignore_session_variables) OR ignore_session_variables = '') NOT NULL DEFAULT '' , hostgroup_settings VARCHAR CHECK (JSON_VALID(hostgroup_settings) OR hostgroup_settings = '') NOT NULL DEFAULT '' , servers_defaults VARCHAR CHECK (JSON_VALID(servers_defaults) OR servers_defaults = '') NOT NULL DEFAULT '' , comment VARCHAR NOT NULL DEFAULT '')" -#define MYHGM_MYSQL_SERVERS_SSL_PARAMS "CREATE TABLE mysql_servers_ssl_params (hostname VARCHAR NOT NULL , port INT CHECK (port >= 0 AND port <= 65535) NOT NULL DEFAULT 3306 , username VARCHAR NOT NULL DEFAULT '' , ssl_ca VARCHAR NOT NULL DEFAULT '' , ssl_cert VARCHAR NOT NULL DEFAULT '' , ssl_key VARCHAR NOT NULL DEFAULT '' , ssl_capath VARCHAR NOT NULL DEFAULT '' , ssl_crl VARCHAR NOT NULL DEFAULT '' , ssl_crlpath VARCHAR NOT NULL DEFAULT '' , ssl_cipher VARCHAR NOT NULL DEFAULT '' , tls_version VARCHAR NOT NULL DEFAULT '' , comment VARCHAR NOT NULL DEFAULT '' , PRIMARY KEY (hostname, port, username) )" +#define MYHGM_MYSQL_SERVERS_SSL_PARAMS "CREATE TABLE IF NOT EXISTS mysql_servers_ssl_params (hostname VARCHAR NOT NULL , port INT CHECK (port >= 0 AND port <= 65535) NOT NULL DEFAULT 3306 , username VARCHAR NOT NULL DEFAULT '' , ssl_ca VARCHAR NOT NULL DEFAULT '' , ssl_cert VARCHAR NOT NULL DEFAULT '' , ssl_key VARCHAR NOT NULL DEFAULT '' , ssl_capath VARCHAR NOT NULL DEFAULT '' , ssl_crl VARCHAR NOT NULL DEFAULT '' , ssl_crlpath VARCHAR NOT NULL DEFAULT '' , ssl_cipher VARCHAR NOT NULL DEFAULT '' , tls_version VARCHAR NOT NULL DEFAULT '' , comment VARCHAR NOT NULL DEFAULT '' , PRIMARY KEY (hostname, port, username) )" /* * @brief Generates the 'runtime_mysql_servers' resultset exposed to other ProxySQL cluster members. diff --git a/include/PgSQL_HostGroups_Manager.h b/include/PgSQL_HostGroups_Manager.h index 4f66c7ef80..8e23fb8810 100644 --- a/include/PgSQL_HostGroups_Manager.h +++ b/include/PgSQL_HostGroups_Manager.h @@ -40,17 +40,17 @@ // we have 2 versions of the same tables: with (debug) and without (no debug) checks #ifdef DEBUG -#define MYHGM_PgSQL_SERVERS "CREATE TABLE pgsql_servers ( hostgroup_id INT NOT NULL DEFAULT 0 , hostname VARCHAR NOT NULL , port INT NOT NULL DEFAULT 5432 , weight INT CHECK (weight >= 0) NOT NULL DEFAULT 1 , status INT CHECK (status IN (0, 1, 2, 3, 4)) NOT NULL DEFAULT 0 , compression INT CHECK (compression >=0 AND compression <= 102400) NOT NULL DEFAULT 0 , max_connections INT CHECK (max_connections >=0) NOT NULL DEFAULT 1000 , max_replication_lag INT CHECK (max_replication_lag >= 0 AND max_replication_lag <= 126144000) NOT NULL DEFAULT 0 , use_ssl INT CHECK (use_ssl IN(0,1)) NOT NULL DEFAULT 0 , max_latency_ms INT UNSIGNED CHECK (max_latency_ms>=0) NOT NULL DEFAULT 0 , comment VARCHAR NOT NULL DEFAULT '' , mem_pointer INT NOT NULL DEFAULT 0 , PRIMARY KEY (hostgroup_id, hostname, port) )" -#define MYHGM_PgSQL_SERVERS_INCOMING "CREATE TABLE pgsql_servers_incoming ( hostgroup_id INT NOT NULL DEFAULT 0 , hostname VARCHAR NOT NULL , port INT NOT NULL DEFAULT 5432 , weight INT CHECK (weight >= 0) NOT NULL DEFAULT 1 , status INT CHECK (status IN (0, 1, 2, 3, 4)) NOT NULL DEFAULT 0 , compression INT CHECK (compression >=0 AND compression <= 102400) NOT NULL DEFAULT 0 , max_connections INT CHECK (max_connections >=0) NOT NULL DEFAULT 1000 , max_replication_lag INT CHECK (max_replication_lag >= 0 AND max_replication_lag <= 126144000) NOT NULL DEFAULT 0 , use_ssl INT CHECK (use_ssl IN(0,1)) NOT NULL DEFAULT 0 , max_latency_ms INT UNSIGNED CHECK (max_latency_ms>=0) NOT NULL DEFAULT 0 , comment VARCHAR NOT NULL DEFAULT '' , PRIMARY KEY (hostgroup_id, hostname, port))" +#define MYHGM_PgSQL_SERVERS "CREATE TABLE IF NOT EXISTS pgsql_servers ( hostgroup_id INT NOT NULL DEFAULT 0 , hostname VARCHAR NOT NULL , port INT NOT NULL DEFAULT 5432 , weight INT CHECK (weight >= 0) NOT NULL DEFAULT 1 , status INT CHECK (status IN (0, 1, 2, 3, 4)) NOT NULL DEFAULT 0 , compression INT CHECK (compression >=0 AND compression <= 102400) NOT NULL DEFAULT 0 , max_connections INT CHECK (max_connections >=0) NOT NULL DEFAULT 1000 , max_replication_lag INT CHECK (max_replication_lag >= 0 AND max_replication_lag <= 126144000) NOT NULL DEFAULT 0 , use_ssl INT CHECK (use_ssl IN(0,1)) NOT NULL DEFAULT 0 , max_latency_ms INT UNSIGNED CHECK (max_latency_ms>=0) NOT NULL DEFAULT 0 , comment VARCHAR NOT NULL DEFAULT '' , mem_pointer INT NOT NULL DEFAULT 0 , PRIMARY KEY (hostgroup_id, hostname, port) )" +#define MYHGM_PgSQL_SERVERS_INCOMING "CREATE TABLE IF NOT EXISTS pgsql_servers_incoming ( hostgroup_id INT NOT NULL DEFAULT 0 , hostname VARCHAR NOT NULL , port INT NOT NULL DEFAULT 5432 , weight INT CHECK (weight >= 0) NOT NULL DEFAULT 1 , status INT CHECK (status IN (0, 1, 2, 3, 4)) NOT NULL DEFAULT 0 , compression INT CHECK (compression >=0 AND compression <= 102400) NOT NULL DEFAULT 0 , max_connections INT CHECK (max_connections >=0) NOT NULL DEFAULT 1000 , max_replication_lag INT CHECK (max_replication_lag >= 0 AND max_replication_lag <= 126144000) NOT NULL DEFAULT 0 , use_ssl INT CHECK (use_ssl IN(0,1)) NOT NULL DEFAULT 0 , max_latency_ms INT UNSIGNED CHECK (max_latency_ms>=0) NOT NULL DEFAULT 0 , comment VARCHAR NOT NULL DEFAULT '' , PRIMARY KEY (hostgroup_id, hostname, port))" #else -#define MYHGM_PgSQL_SERVERS "CREATE TABLE pgsql_servers ( hostgroup_id INT NOT NULL DEFAULT 0 , hostname VARCHAR NOT NULL , port INT NOT NULL DEFAULT 5432 , weight INT NOT NULL DEFAULT 1 , status INT NOT NULL DEFAULT 0 , compression INT NOT NULL DEFAULT 0 , max_connections INT NOT NULL DEFAULT 1000 , max_replication_lag INT NOT NULL DEFAULT 0 , use_ssl INT NOT NULL DEFAULT 0 , max_latency_ms INT UNSIGNED NOT NULL DEFAULT 0 , comment VARCHAR NOT NULL DEFAULT '' , mem_pointer INT NOT NULL DEFAULT 0 , PRIMARY KEY (hostgroup_id, hostname, port) )" -#define MYHGM_PgSQL_SERVERS_INCOMING "CREATE TABLE pgsql_servers_incoming ( hostgroup_id INT NOT NULL DEFAULT 0 , hostname VARCHAR NOT NULL , port INT NOT NULL DEFAULT 5432 , weight INT NOT NULL DEFAULT 1 , status INT NOT NULL DEFAULT 0 , compression INT NOT NULL DEFAULT 0 , max_connections INT NOT NULL DEFAULT 1000 , max_replication_lag INT NOT NULL DEFAULT 0 , use_ssl INT NOT NULL DEFAULT 0 , max_latency_ms INT UNSIGNED NOT NULL DEFAULT 0 , comment VARCHAR NOT NULL DEFAULT '' , PRIMARY KEY (hostgroup_id, hostname, port))" +#define MYHGM_PgSQL_SERVERS "CREATE TABLE IF NOT EXISTS pgsql_servers ( hostgroup_id INT NOT NULL DEFAULT 0 , hostname VARCHAR NOT NULL , port INT NOT NULL DEFAULT 5432 , weight INT NOT NULL DEFAULT 1 , status INT NOT NULL DEFAULT 0 , compression INT NOT NULL DEFAULT 0 , max_connections INT NOT NULL DEFAULT 1000 , max_replication_lag INT NOT NULL DEFAULT 0 , use_ssl INT NOT NULL DEFAULT 0 , max_latency_ms INT UNSIGNED NOT NULL DEFAULT 0 , comment VARCHAR NOT NULL DEFAULT '' , mem_pointer INT NOT NULL DEFAULT 0 , PRIMARY KEY (hostgroup_id, hostname, port) )" +#define MYHGM_PgSQL_SERVERS_INCOMING "CREATE TABLE IF NOT EXISTS pgsql_servers_incoming ( hostgroup_id INT NOT NULL DEFAULT 0 , hostname VARCHAR NOT NULL , port INT NOT NULL DEFAULT 5432 , weight INT NOT NULL DEFAULT 1 , status INT NOT NULL DEFAULT 0 , compression INT NOT NULL DEFAULT 0 , max_connections INT NOT NULL DEFAULT 1000 , max_replication_lag INT NOT NULL DEFAULT 0 , use_ssl INT NOT NULL DEFAULT 0 , max_latency_ms INT UNSIGNED NOT NULL DEFAULT 0 , comment VARCHAR NOT NULL DEFAULT '' , PRIMARY KEY (hostgroup_id, hostname, port))" #endif /* DEBUG */ -#define MYHGM_PgSQL_REPLICATION_HOSTGROUPS "CREATE TABLE pgsql_replication_hostgroups (writer_hostgroup INT CHECK (writer_hostgroup>=0) NOT NULL PRIMARY KEY , reader_hostgroup INT NOT NULL CHECK (reader_hostgroup<>writer_hostgroup AND reader_hostgroup>=0) , check_type VARCHAR CHECK (LOWER(check_type) IN ('read_only')) NOT NULL DEFAULT 'read_only' , comment VARCHAR NOT NULL DEFAULT '' , UNIQUE (reader_hostgroup))" +#define MYHGM_PgSQL_REPLICATION_HOSTGROUPS "CREATE TABLE IF NOT EXISTS pgsql_replication_hostgroups (writer_hostgroup INT CHECK (writer_hostgroup>=0) NOT NULL PRIMARY KEY , reader_hostgroup INT NOT NULL CHECK (reader_hostgroup<>writer_hostgroup AND reader_hostgroup>=0) , check_type VARCHAR CHECK (LOWER(check_type) IN ('read_only')) NOT NULL DEFAULT 'read_only' , comment VARCHAR NOT NULL DEFAULT '' , UNIQUE (reader_hostgroup))" #define PGHGM_GEN_ADMIN_RUNTIME_SERVERS "SELECT hostgroup_id, hostname, port, CASE status WHEN 0 THEN \"ONLINE\" WHEN 1 THEN \"SHUNNED\" WHEN 2 THEN \"OFFLINE_SOFT\" WHEN 3 THEN \"OFFLINE_HARD\" WHEN 4 THEN \"SHUNNED\" END status, weight, compression, max_connections, max_replication_lag, use_ssl, max_latency_ms, comment FROM pgsql_servers ORDER BY hostgroup_id, hostname, port" -#define MYHGM_PgSQL_HOSTGROUP_ATTRIBUTES "CREATE TABLE pgsql_hostgroup_attributes (hostgroup_id INT NOT NULL PRIMARY KEY , max_num_online_servers INT CHECK (max_num_online_servers>=0 AND max_num_online_servers <= 1000000) NOT NULL DEFAULT 1000000 , autocommit INT CHECK (autocommit IN (-1, 0, 1)) NOT NULL DEFAULT -1 , free_connections_pct INT CHECK (free_connections_pct >= 0 AND free_connections_pct <= 100) NOT NULL DEFAULT 10 , init_connect VARCHAR NOT NULL DEFAULT '' , multiplex INT CHECK (multiplex IN (0, 1)) NOT NULL DEFAULT 1 , connection_warming INT CHECK (connection_warming IN (0, 1)) NOT NULL DEFAULT 0 , throttle_connections_per_sec INT CHECK (throttle_connections_per_sec >= 1 AND throttle_connections_per_sec <= 1000000) NOT NULL DEFAULT 1000000 , ignore_session_variables VARCHAR CHECK (JSON_VALID(ignore_session_variables) OR ignore_session_variables = '') NOT NULL DEFAULT '' , hostgroup_settings VARCHAR CHECK (JSON_VALID(hostgroup_settings) OR hostgroup_settings = '') NOT NULL DEFAULT '' , servers_defaults VARCHAR CHECK (JSON_VALID(servers_defaults) OR servers_defaults = '') NOT NULL DEFAULT '' , comment VARCHAR NOT NULL DEFAULT '')" +#define MYHGM_PgSQL_HOSTGROUP_ATTRIBUTES "CREATE TABLE IF NOT EXISTS pgsql_hostgroup_attributes (hostgroup_id INT NOT NULL PRIMARY KEY , max_num_online_servers INT CHECK (max_num_online_servers>=0 AND max_num_online_servers <= 1000000) NOT NULL DEFAULT 1000000 , autocommit INT CHECK (autocommit IN (-1, 0, 1)) NOT NULL DEFAULT -1 , free_connections_pct INT CHECK (free_connections_pct >= 0 AND free_connections_pct <= 100) NOT NULL DEFAULT 10 , init_connect VARCHAR NOT NULL DEFAULT '' , multiplex INT CHECK (multiplex IN (0, 1)) NOT NULL DEFAULT 1 , connection_warming INT CHECK (connection_warming IN (0, 1)) NOT NULL DEFAULT 0 , throttle_connections_per_sec INT CHECK (throttle_connections_per_sec >= 1 AND throttle_connections_per_sec <= 1000000) NOT NULL DEFAULT 1000000 , ignore_session_variables VARCHAR CHECK (JSON_VALID(ignore_session_variables) OR ignore_session_variables = '') NOT NULL DEFAULT '' , hostgroup_settings VARCHAR CHECK (JSON_VALID(hostgroup_settings) OR hostgroup_settings = '') NOT NULL DEFAULT '' , servers_defaults VARCHAR CHECK (JSON_VALID(servers_defaults) OR servers_defaults = '') NOT NULL DEFAULT '' , comment VARCHAR NOT NULL DEFAULT '')" /* * @brief Generates the 'runtime_pgsql_servers' resultset exposed to other ProxySQL cluster members. diff --git a/include/PgSQL_Thread.h b/include/PgSQL_Thread.h index 35657449a5..163e36f9c6 100644 --- a/include/PgSQL_Thread.h +++ b/include/PgSQL_Thread.h @@ -1050,10 +1050,10 @@ class PgSQL_Threads_Handler bool query_cache_stores_empty_result; bool kill_backend_connection_when_disconnect; bool client_session_track_gtid; - bool enable_client_deprecate_eof; - bool enable_server_deprecate_eof; - bool enable_load_data_local_infile; - bool log_mysql_warnings_enabled; + //bool enable_client_deprecate_eof; + //bool enable_server_deprecate_eof; + //bool enable_load_data_local_infile; + //bool log_mysql_warnings_enabled; int data_packets_history_size; int handle_warnings; char* server_version; diff --git a/include/proxysql_glovars.hpp b/include/proxysql_glovars.hpp index c5b52b2db1..806802c668 100644 --- a/include/proxysql_glovars.hpp +++ b/include/proxysql_glovars.hpp @@ -87,8 +87,50 @@ class ProxySQL_GlobalVariables { unsigned long long start_time; bool gdbg; bool nostart; + /** + * @brief Disable/Enable the MySQL Monitor module. + * @details Meant to be configured as an startup switch. Possible to change it's value only via a + * command line switch or via config file option. + */ bool my_monitor; + /** + * @brief Disable/Enable the PostgreSQL Monitor module. + * @details Meant to be configured as an startup switch. Possible to change it's value only via a + * command line switch or via config file option. + */ bool pg_monitor; + /** + * @brief Disable/Enable the MySQL Workers module. This disables ProxySQL capability for handling + * MySQL traffic to be routed to the MySQL backend servers. + * @details Meant to be configured as an startup switch. Possible to change it's value only via a + * command line switch or via config file option. Disabling this module doesn't affect MySQL + * Monitoring. + */ + bool mysql_workers; + /** + * @brief Disable/Enable the PostgreSQL Workers module. This disables ProxySQL capability for handling + * PostgreSQL traffic to be routed to the PostgreSQL backend servers. + * @details Meant to be configured as an startup switch. Possible to change it's value only via a + * command line switch or via config file option. Disabling this module doesn't affect PostgreSQL + * Monitoring. + */ + bool pgsql_workers; + /** + * @brief Disable/Enable MySQL Admin module. This disables access, via MySQL protocol, to + * ProxySQL Administration interface. + * @details Meant to be configured as an startup switch. Possible to change it's value only via a + * command line switch or via config file option. It's important to notice that Administrative access + * remains possible via PostgreSQL Admin interface, if enabled. + */ + bool mysql_admin; + /** + * @brief Disable/Enable PostgreSQL Admin module. This disables access, via PostgreSQL + * protocol, to ProxySQL Administration interface. + * @details Meant to be configured as an startup switch. Possible to change it's value only via a + * command line switch or via config file option. It's important to notice that Administrative access + * remains possible via the MySQL Admin interface, if enabled. + */ + bool pgsql_admin; bool version_check; #ifdef SO_REUSEPORT bool reuseport; diff --git a/include/proxysql_structs.h b/include/proxysql_structs.h index ee5c096814..d53cbc882c 100644 --- a/include/proxysql_structs.h +++ b/include/proxysql_structs.h @@ -758,6 +758,14 @@ enum proxysql_session_type { PROXYSQL_SESSION_NONE }; +// Stop state enumeration for PROXYSQL STOP command (issue 5186) +// Used to manage admin query access during module stop/start cycle +enum proxy_stop_state { + STOP_STATE_RUNNING = 0, // Normal operation, all modules running + STOP_STATE_DRAINING = 1, // Admin queries being drained, modules stopping + STOP_STATE_STOPPED = 2 // Modules stopped, only safe queries allowed +}; + #endif /* PROXYSQL_ENUMS */ @@ -959,6 +967,11 @@ struct _global_variables_t { bool nostart; int reload; + // Stop state management for PROXYSQL STOP command + // See issue 5186: Fix query handling after PROXYSQL STOP + volatile int stop_state; + uint64_t active_admin_queries; + unsigned char protocol_version; char *mysql_server_version; uint32_t server_capabilities; diff --git a/lib/Admin_Handler.cpp b/lib/Admin_Handler.cpp index 288ca2a85c..07b54ae584 100644 --- a/lib/Admin_Handler.cpp +++ b/lib/Admin_Handler.cpp @@ -89,6 +89,9 @@ extern "C" void __gcov_dump(); extern "C" void __gcov_reset(); #endif +// Function declarations for issue 5186 +extern void ProxySQL_Main_init_main_modules(); + #ifdef DEBUG //#define BENCHMARK_FASTROUTING_LOAD @@ -482,6 +485,54 @@ bool admin_handler_command_proxysql(char *query_no_space, unsigned int query_no_ ProxySQL_Admin* SPA = (ProxySQL_Admin*)pa; bool rc = false; + // Handle PROXYSQL START after PROXYSQL STOP (issue 5186) + if (glovars.stop_state == STOP_STATE_STOPPED) { + proxy_info("PROXYSQL START: Restarting modules after STOP\n"); + + /* + * CRITICAL: Why proper restart after PROXYSQL STOP is essential + * ================================================================= + * + * PROXYSQL STOP performs a complete shutdown of all core modules: + * 1. MySQL and PgSQL thread pools (GloMTH, GloPTH) are shutdown + * 2. Query Processors (GloMyQPro, GloMyAuth, etc.) are destroyed + * 3. All global module pointers are set to NULL + * 4. Thread synchronization objects are cleaned up + * + * Simply calling ProxySQL_Main_init_main_modules() is INSUFFICIENT because: + * - It doesn't properly reinitialize thread synchronization + * - It doesn't restart the MySQL/PgSQL thread pools + * - It doesn't ensure proper thread initialization sequencing + * - It leaves modules in inconsistent state + * + * Without complete reinitialization, admin queries will crash with: + * - Segmentation faults accessing destroyed Query Processor modules + * - Race conditions with partially initialized thread pools + * - NULL pointer dereferences in GloMyQPro, GloMyAuth, etc. + * - Lock contention on destroyed synchronization objects + * + * SOLUTION: Simulate initial startup conditions: + * 1. Set GloVars.global.nostart = 1 (simulate "not started" state) + * 2. Set admin_nostart_ = true (trigger startup logic) + * 3. Let the normal START sequence reinitialize everything properly + * 4. Ensure thread pools, query processors, and sync objects are rebuilt + * 5. Maintain same initialization order as initial startup + * + * This prevents crashes and ensures full STOP/START functionality. + */ + + // Reset state to running and set nostart_ to trigger normal startup sequence + glovars.stop_state = STOP_STATE_RUNNING; + glovars.reload = 0; + glovars.shutdown = 0; + // Set nostart_ to true so the normal startup logic will trigger + GloVars.global.nostart = 1; + + // Continue to normal startup logic below + admin_nostart_ = true; + } + + // Handle normal START (initial startup or restart after STOP) if (admin_nostart_) { rc = __sync_bool_compare_and_swap(&GloVars.global.nostart, 1, 0); } @@ -536,6 +587,16 @@ bool admin_handler_command_proxysql(char *query_no_space, unsigned int query_no_ } } + // Check if already stopped (issue 5186) + if (glovars.stop_state == STOP_STATE_STOPPED) { + SPA->send_error_msg_to_client(sess, (char*)"ProxySQL modules are already stopped"); + return false; + } + + // Set state to DRAINING - stop accepting new admin queries (issue 5186) + glovars.stop_state = STOP_STATE_DRAINING; + proxy_info("PROXYSQL STOP: Setting state to DRAINING, waiting for %lu admin queries to complete\n", (unsigned long)glovars.active_admin_queries); + char buf[32]; // ----- MySQL module stop ----- @@ -558,22 +619,72 @@ bool admin_handler_command_proxysql(char *query_no_space, unsigned int query_no_ GloPTH->set_variable((char*)"wait_timeout", buf); GloPTH->commit(); - // ----- Common shutdown actions ----- + // ----- Wait for admin queries to complete (issue 5186) ----- + int wait_time_ms = 0; + int max_wait_time_ms = 30000; // 30 seconds timeout + uint64_t last_active_queries = glovars.active_admin_queries; + int stable_count = 0; + + proxy_info("PROXYSQL STOP: Initial admin query count: %lu\n", (unsigned long)glovars.active_admin_queries); + + // Wait for all other admin queries to complete (subtract 1 for current PROXYSQL STOP query) + while (glovars.active_admin_queries > 1 && wait_time_ms < max_wait_time_ms) { + usleep(100000); // 100ms intervals + wait_time_ms += 100; + + if (last_active_queries == glovars.active_admin_queries) { + stable_count++; + } else { + stable_count = 0; + last_active_queries = glovars.active_admin_queries; + proxy_info("PROXYSQL STOP: Admin query count changed to: %lu\n", (unsigned long)glovars.active_admin_queries); + } + + if (wait_time_ms % 1000 == 0) { + proxy_info("PROXYSQL STOP: Waiting for %lu admin queries to complete (%d/%ds), stable for %d cycles\n", + (unsigned long)(glovars.active_admin_queries - 1), wait_time_ms/1000, max_wait_time_ms/1000, stable_count); + } + } + + if (glovars.active_admin_queries > 1) { + proxy_warning("PROXYSQL STOP: %lu admin queries still active after timeout (stable count: %d), proceeding with module stop\n", + (unsigned long)(glovars.active_admin_queries - 1), stable_count); + } else { + proxy_info("PROXYSQL STOP: All admin queries completed, proceeding with module stop\n"); + } + + // ----- Common module stop actions ----- glovars.reload = 2; + glovars.stop_state = STOP_STATE_STOPPED; // Reset Prometheus counters if (GloVars.prometheus_registry) GloVars.prometheus_registry->ResetCounters(); - // Signal shutdown and wait for completion + // Signal module stop and wait for completion + proxy_info("PROXYSQL STOP: Starting thread shutdown sequence\n"); __sync_bool_compare_and_swap(&glovars.shutdown, 0, 1); + + proxy_info("PROXYSQL STOP: Signaling MySQL threads to shutdown\n"); GloMTH->signal_all_threads(0); + proxy_info("PROXYSQL STOP: MySQL threads signaled\n"); + + proxy_info("PROXYSQL STOP: Signaling PgSQL threads to shutdown\n"); GloPTH->signal_all_threads(0); + proxy_info("PROXYSQL STOP: PgSQL threads signaled\n"); + proxy_info("PROXYSQL STOP: Entering shutdown wait loop\n"); + int wait_count = 0; while (__sync_fetch_and_add(&glovars.shutdown, 0) == 1) { usleep(1000); + wait_count++; + if (wait_count % 1000 == 0) { // Log every 1 second + proxy_info("PROXYSQL STOP: Still waiting for thread shutdown, count=%d\n", wait_count); + } } + proxy_info("PROXYSQL STOP: Exited shutdown wait loop after %d iterations\n", wait_count); + proxy_info("PROXYSQL STOP: Module stop completed, all modules stopped\n"); SPA->send_ok_msg_to_client(sess, NULL, 0, query_no_space); return false; @@ -2332,6 +2443,10 @@ template void admin_session_handler(S* sess, void *_pa, PtrSize_t *pkt) { ProxySQL_Admin *pa=(ProxySQL_Admin *)_pa; + + // Increment admin query counter for issue 5186 + // This tracks active admin queries during PROXYSQL STOP + __sync_fetch_and_add(&glovars.active_admin_queries, 1); bool needs_vacuum = false; char *error=NULL; int cols; @@ -2597,9 +2712,16 @@ void admin_session_handler(S* sess, void *_pa, PtrSize_t *pkt) { if (!strncasecmp(CLUSTER_QUERY_MYSQL_USERS, query_no_space, strlen(CLUSTER_QUERY_MYSQL_USERS))) { if (sess->session_type == PROXYSQL_SESSION_ADMIN) { - pthread_mutex_lock(&users_mutex); - resultset = GloMyAuth->get_current_mysql_users(); - pthread_mutex_unlock(&users_mutex); + if (glovars.stop_state == STOP_STATE_RUNNING) { + pthread_mutex_lock(&users_mutex); + resultset = GloMyAuth->get_current_mysql_users(); + pthread_mutex_unlock(&users_mutex); + } else { + // Return empty resultset when modules are stopped (issue 5186) + resultset = new SQLite3_result(2); + resultset->add_column_definition(SQLITE_TEXT, "username"); + resultset->add_column_definition(SQLITE_TEXT, "password"); + } if (resultset != nullptr) { sess->SQLite3_to_MySQL(resultset, error, affected_rows, &sess->client_myds->myprot); run_query=false; @@ -2610,28 +2732,37 @@ void admin_session_handler(S* sess, void *_pa, PtrSize_t *pkt) { if (sess->session_type == PROXYSQL_SESSION_ADMIN) { // no stats if (!strncasecmp(CLUSTER_QUERY_MYSQL_QUERY_RULES, query_no_space, strlen(CLUSTER_QUERY_MYSQL_QUERY_RULES))) { - GloMyQPro->wrlock(); - resultset = GloMyQPro->get_current_query_rules_inner(); - if (resultset == NULL) { - GloMyQPro->wrunlock(); // unlock first - resultset = GloMyQPro->get_current_query_rules(); - if (resultset) { + if (glovars.stop_state == STOP_STATE_RUNNING) { + GloMyQPro->wrlock(); + resultset = GloMyQPro->get_current_query_rules_inner(); + if (resultset == NULL) { + GloMyQPro->wrunlock(); // unlock first + resultset = GloMyQPro->get_current_query_rules(); + if (resultset) { + sess->SQLite3_to_MySQL(resultset, error, affected_rows, &sess->client_myds->myprot); + delete resultset; + run_query=false; + } + } else { sess->SQLite3_to_MySQL(resultset, error, affected_rows, &sess->client_myds->myprot); - delete resultset; + GloMyQPro->wrunlock(); run_query=false; - goto __run_query; } } else { + // Return empty resultset when modules are stopped (issue 5186) + resultset = new SQLite3_result(2); + resultset->add_column_definition(SQLITE_TEXT, "rule_id"); + resultset->add_column_definition(SQLITE_TEXT, "active"); sess->SQLite3_to_MySQL(resultset, error, affected_rows, &sess->client_myds->myprot); - //delete resultset; // DO NOT DELETE . This is the inner resultset of Query_Processor - GloMyQPro->wrunlock(); + delete resultset; run_query=false; goto __run_query; } } if (!strncasecmp(CLUSTER_QUERY_MYSQL_QUERY_RULES_FAST_ROUTING, query_no_space, strlen(CLUSTER_QUERY_MYSQL_QUERY_RULES_FAST_ROUTING))) { - GloMyQPro->wrlock(); - resultset = GloMyQPro->get_current_query_rules_fast_routing_inner(); + if (glovars.stop_state == STOP_STATE_RUNNING) { + GloMyQPro->wrlock(); + resultset = GloMyQPro->get_current_query_rules_fast_routing_inner(); if (resultset == NULL) { GloMyQPro->wrunlock(); // unlock first resultset = GloMyQPro->get_current_query_rules_fast_routing(); @@ -2648,6 +2779,16 @@ void admin_session_handler(S* sess, void *_pa, PtrSize_t *pkt) { run_query=false; goto __run_query; } + } else { + // Return empty resultset when modules are stopped (issue 5186) + resultset = new SQLite3_result(2); + resultset->add_column_definition(SQLITE_TEXT, "rule_id"); + resultset->add_column_definition(SQLITE_TEXT, "hostname"); + sess->SQLite3_to_MySQL(resultset, error, affected_rows, &sess->client_myds->myprot); + delete resultset; + run_query=false; + goto __run_query; + } } } @@ -2655,7 +2796,10 @@ void admin_session_handler(S* sess, void *_pa, PtrSize_t *pkt) { // SELECT COUNT(*) FROM runtime_mysql_query_rules_fast_routing // we just return the count if (strcmp("SELECT COUNT(*) FROM runtime_mysql_query_rules_fast_routing", query_no_space)==0) { - int cnt = GloMyQPro->get_current_query_rules_fast_routing_count(); + int cnt = 0; + if (glovars.stop_state == STOP_STATE_RUNNING) { + cnt = GloMyQPro->get_current_query_rules_fast_routing_count(); + } l_free(query_length,query); char buf[256]; sprintf(buf,"SELECT %d AS 'COUNT(*)'", cnt); @@ -2861,11 +3005,46 @@ void admin_session_handler(S* sess, void *_pa, PtrSize_t *pkt) { goto __run_query; } } +#if 0 { + // this block seems unnecessary, as we have enough fencing ProxySQL_Admin *SPA=(ProxySQL_Admin *)pa; + // Check if this is a dangerous query that should be blocked during STOP states (issue 5186) + if (glovars.stop_state != STOP_STATE_RUNNING && sess->session_type == PROXYSQL_SESSION_ADMIN) { + // Block dangerous runtime_* queries that access destroyed modules + if (!strncasecmp(query_no_space, "SELECT COUNT(*) FROM runtime_mysql_query_rules", strlen("SELECT COUNT(*) FROM runtime_mysql_query_rules")) || + !strncasecmp(query_no_space, "SELECT COUNT(*) FROM runtime_mysql_query_rules_fast_routing", strlen("SELECT COUNT(*) FROM runtime_mysql_query_rules_fast_routing")) || + !strncasecmp(query_no_space, "SELECT COUNT(*) FROM runtime_mysql_users", strlen("SELECT COUNT(*) FROM runtime_mysql_users")) || + !strncasecmp(query_no_space, "SELECT COUNT(*) FROM stats_mysql_query_digest", strlen("SELECT COUNT(*) FROM stats_mysql_query_digest")) || + !strncasecmp(query_no_space, "SELECT * FROM runtime_mysql_query_rules", strlen("SELECT * FROM runtime_mysql_query_rules")) || + !strncasecmp(query_no_space, "SELECT * FROM runtime_mysql_query_rules_fast_routing", strlen("SELECT * FROM runtime_mysql_query_rules_fast_routing")) || + !strncasecmp(query_no_space, "SELECT * FROM runtime_mysql_users", strlen("SELECT * FROM runtime_mysql_users")) || + !strncasecmp(query_no_space, "SELECT * FROM stats_mysql_query_digest", strlen("SELECT * FROM stats_mysql_query_digest"))) { + + //l_free(query_length, query); // ASAN correctly reports a double free + + // Return empty resultset instead of crashing + SQLite3_result *resultset = new SQLite3_result(1); + resultset->add_column_definition(SQLITE_TEXT, "COUNT(*)"); + + // Add a single row with 0 for COUNT(*) queries + SQLite3_row *row = new SQLite3_row(1); + char *field_val = strdup("0"); + row->fields[0] = field_val; + resultset->add_row(row); + + sess->SQLite3_to_MySQL(resultset, error, affected_rows, &sess->client_myds->myprot); + delete resultset; + delete row; + free(field_val); + + run_query = false; + goto __run_query; + } + } needs_vacuum = SPA->GenericRefreshStatistics(query_no_space,query_no_space_length, ( sess->session_type == PROXYSQL_SESSION_ADMIN ? true : false ) ); } - +#endif // 0 if (!strncasecmp("SHOW GLOBAL VARIABLES LIKE 'read_only'", query_no_space, strlen("SHOW GLOBAL VARIABLES LIKE 'read_only'"))) { l_free(query_length,query); @@ -4137,6 +4316,12 @@ void admin_session_handler(S* sess, void *_pa, PtrSize_t *pkt) { pthread_mutex_unlock(&pa->sql_query_global_mutex); } } + +__exit_cleanup: + // Decrement admin query counter for issue 5186 + // This tracks active admin queries during PROXYSQL STOP + __sync_fetch_and_sub(&glovars.active_admin_queries, 1); + l_free(pkt->size-sizeof(mysql_hdr),query_no_space); // it is always freed here l_free(query_length,query); } diff --git a/lib/MySQL_Thread.cpp b/lib/MySQL_Thread.cpp index e299d3dab8..09e8e1cf55 100644 --- a/lib/MySQL_Thread.cpp +++ b/lib/MySQL_Thread.cpp @@ -2073,8 +2073,10 @@ bool MySQL_Threads_Handler::set_variable(char *name, const char *value) { // thi } } if (!strcasecmp(name,"threads")) { - unsigned int intv=atoi(value); - if ((num_threads==0 || num_threads==intv || mysql_threads==NULL) && intv > 0 && intv < 256) { + const uint32_t intv { !GloVars.global.mysql_workers ? uint32_t(0) : atoi(value) }; + const bool valid_val { (intv > 0 && intv < 256) || (!GloVars.global.mysql_workers && intv == 0) }; + + if ((num_threads==0 || num_threads==intv || mysql_threads==NULL) && valid_val) { num_threads=intv; this->status_variables.p_gauge_array[p_th_gauge::mysql_thread_workers]->Set(intv); return true; @@ -2428,7 +2430,7 @@ void MySQL_Threads_Handler::init(unsigned int num, size_t stack) { num_threads=num; this->status_variables.p_gauge_array[p_th_gauge::mysql_thread_workers]->Set(num); } else { - if (num_threads==0) { + if (num_threads==0 && GloVars.global.mysql_workers) { num_threads=DEFAULT_NUM_THREADS; //default this->status_variables.p_gauge_array[p_th_gauge::mysql_thread_workers]->Set(DEFAULT_NUM_THREADS); } @@ -2483,7 +2485,7 @@ proxysql_mysql_thread_t * MySQL_Threads_Handler::create_thread(unsigned int tn, if (GloVars.set_thread_name == true) { char thr_name[16]; snprintf(thr_name, sizeof(thr_name), "MySQLIdle%d", tn); - pthread_setname_np(mysql_threads[tn].thread_id, thr_name); + pthread_setname_np(mysql_threads_idles[tn].thread_id, thr_name); } } #endif // defined(__linux__) || defined(__FreeBSD__) diff --git a/lib/PgSQL_Session.cpp b/lib/PgSQL_Session.cpp index 773358cb02..bffff1146a 100644 --- a/lib/PgSQL_Session.cpp +++ b/lib/PgSQL_Session.cpp @@ -690,97 +690,6 @@ bool PgSQL_Session::handler_special_queries(PtrSize_t* pkt, bool* lock_hostgroup return false; } } - /* - //handle 2564 - if (pkt->size == SELECT_VERSION_COMMENT_LEN + 5 && *((char*)(pkt->ptr) + 4) == (char)0x03 && strncmp((char*)SELECT_VERSION_COMMENT, (char*)pkt->ptr + 5, pkt->size - 5) == 0) { - // FIXME: this doesn't return AUTOCOMMIT or IN_TRANS - PtrSize_t pkt_2; - if (deprecate_eof_active) { - pkt_2.size = PROXYSQL_VERSION_COMMENT_WITH_OK_LEN; - pkt_2.ptr = l_alloc(pkt_2.size); - memcpy(pkt_2.ptr, PROXYSQL_VERSION_COMMENT_WITH_OK, pkt_2.size); - } - else { - pkt_2.size = PROXYSQL_VERSION_COMMENT_LEN; - pkt_2.ptr = l_alloc(pkt_2.size); - memcpy(pkt_2.ptr, PROXYSQL_VERSION_COMMENT, pkt_2.size); - } - status = WAITING_CLIENT_DATA; - client_myds->DSS = STATE_SLEEP; - client_myds->PSarrayOUT->add(pkt_2.ptr, pkt_2.size); - if (mirror == false) { - RequestEnd(NULL); - } - l_free(pkt->size, pkt->ptr); - return true; - } - if (pkt->size == strlen((char*)"select USER()") + 5 && strncmp((char*)"select USER()", (char*)pkt->ptr + 5, pkt->size - 5) == 0) { - // FIXME: this doesn't return AUTOCOMMIT or IN_TRANS - char* query1 = (char*)"SELECT \"%s\" AS 'USER()'"; - char* query2 = (char*)malloc(strlen(query1) + strlen(client_myds->myconn->userinfo->username) + 10); - sprintf(query2, query1, client_myds->myconn->userinfo->username); - char* error; - int cols; - int affected_rows; - SQLite3_result* resultset; - GloAdmin->admindb->execute_statement(query2, &error, &cols, &affected_rows, &resultset); - SQLite3_to_MySQL(resultset, error, affected_rows, &client_myds->myprot, false, deprecate_eof_active); - delete resultset; - free(query2); - if (mirror == false) { - RequestEnd(NULL); - } - l_free(pkt->size, pkt->ptr); - return true; - } - // MySQL client check command for dollars quote support, starting at version '8.1.0'. See #4300. - if ((pkt->size == strlen("SELECT $$") + 5) && strncasecmp("SELECT $$", (char*)pkt->ptr + 5, pkt->size - 5) == 0) { - pair err_info{ get_dollar_quote_error(pgsql_thread___server_version) }; - - client_myds->DSS = STATE_QUERY_SENT_NET; - client_myds->myprot.generate_pkt_ERR(true, NULL, NULL, 1, err_info.first, (char*)"HY000", err_info.second, true); - client_myds->DSS = STATE_SLEEP; - status = WAITING_CLIENT_DATA; - - if (mirror == false) { - RequestEnd(NULL); - } - l_free(pkt->size, pkt->ptr); - - return true; - } - - // 'LOAD DATA LOCAL INFILE' is unsupported. We report an specific error to inform clients about this fact. For more context see #833. - if ((pkt->size >= 22 + 5) && (strncasecmp((char*)"LOAD DATA LOCAL INFILE", (char*)pkt->ptr + 5, 22) == 0)) { - if (pgsql_thread___enable_load_data_local_infile == false) { - client_myds->DSS = STATE_QUERY_SENT_NET; - client_myds->myprot.generate_error_packet(true, true, "Unsupported 'LOAD DATA LOCAL INFILE' command", - PGSQL_ERROR_CODES::ERRCODE_FEATURE_NOT_SUPPORTED, false, true); - if (mirror == false) { - RequestEnd(NULL, true); - } - else { - client_myds->DSS = STATE_SLEEP; - status = WAITING_CLIENT_DATA; - } - l_free(pkt->size, pkt->ptr); - return true; - } - else { - if (pgsql_thread___verbose_query_error) { - proxy_warning( - "Command '%.*s' refers to file in ProxySQL instance, NOT on client side!\n", - static_cast(pkt->size - sizeof(mysql_hdr) - 1), - static_cast(pkt->ptr) + 5 - ); - } - else { - proxy_warning( - "Command 'LOAD DATA LOCAL INFILE' refers to file in ProxySQL instance, NOT on client side!\n" - ); - } - } - }*/ return false; } diff --git a/lib/PgSQL_Thread.cpp b/lib/PgSQL_Thread.cpp index d1084d557d..f42dc373f6 100644 --- a/lib/PgSQL_Thread.cpp +++ b/lib/PgSQL_Thread.cpp @@ -288,9 +288,9 @@ static char* pgsql_thread_variables_names[] = { (char*)"connect_timeout_client", (char*)"connect_timeout_server", (char*)"connect_timeout_server_max", - (char*)"enable_client_deprecate_eof", - (char*)"enable_server_deprecate_eof", - (char*)"enable_load_data_local_infile", + //(char*)"enable_client_deprecate_eof", + //(char*)"enable_server_deprecate_eof", + //(char*)"enable_load_data_local_infile", (char*)"eventslog_filename", (char*)"eventslog_filesize", (char*)"eventslog_default_log", @@ -307,7 +307,7 @@ static char* pgsql_thread_variables_names[] = { (char*)"have_ssl", (char*)"have_compress", (char*)"interfaces", - (char*)"log_mysql_warnings_enabled", + //(char*)"log_mysql_warnings_enabled", (char*)"monitor_enabled", (char*)"monitor_history", (char*)"monitor_connect_interval", @@ -1083,10 +1083,10 @@ PgSQL_Threads_Handler::PgSQL_Threads_Handler() { #endif /*debug */ variables.query_digests_grouping_limit = 3; variables.query_digests_groups_grouping_limit = 10; // changed in 2.6.0 , was 0 - variables.enable_client_deprecate_eof = true; - variables.enable_server_deprecate_eof = true; - variables.enable_load_data_local_infile = false; - variables.log_mysql_warnings_enabled = false; + //variables.enable_client_deprecate_eof = true; + //variables.enable_server_deprecate_eof = true; + //variables.enable_load_data_local_infile = false; + //variables.log_mysql_warnings_enabled = false; variables.data_packets_history_size = 0; // status variables status_variables.mirror_sessions_current = 0; @@ -1652,7 +1652,7 @@ bool PgSQL_Threads_Handler::set_variable(char* name, const char* value) { // thi if (intv >= 0 && intv <= 20 * 24 * 3600 * 1000) { variables.wait_timeout = intv; if (variables.wait_timeout < 5000) { - proxy_warning("mysql-wait_timeout is set to a low value: %ums\n", variables.wait_timeout); + proxy_warning("pgsql-wait_timeout is set to a low value: %ums\n", variables.wait_timeout); } return true; } @@ -1962,8 +1962,10 @@ bool PgSQL_Threads_Handler::set_variable(char* name, const char* value) { // thi } } if (!strcasecmp(name, "threads")) { - unsigned int intv = atoi(value); - if ((num_threads == 0 || num_threads == intv || pgsql_threads == NULL) && intv > 0 && intv < 256) { + const uint32_t intv { !GloVars.global.pgsql_workers ? uint32_t(0) : atoi(value) }; + const bool valid_val { (intv > 0 && intv < 256) || (!GloVars.global.pgsql_workers && intv == 0) }; + + if ((num_threads == 0 || num_threads == intv || pgsql_threads == NULL) && valid_val) { num_threads = intv; //this->status_variables.p_gauge_array[p_th_gauge::mysql_thread_workers]->Set(intv); return true; @@ -1994,13 +1996,6 @@ bool PgSQL_Threads_Handler::set_variable(char* name, const char* value) { // thi } return false; } - if (!strcasecmp(name, "forward_autocommit")) { - if (strcasecmp(value, "true") == 0 || strcasecmp(value, "1") == 0) { - proxy_error("Variable mysql-forward_autocommit is deprecated. See issue #3253\n"); - return false; - } - return false; - } if (!strcasecmp(name, "data_packets_history_size")) { int intv = atoi(value); if (intv >= 0 && intv < INT_MAX) { @@ -2030,13 +2025,13 @@ char** PgSQL_Threads_Handler::get_variables_list() { VariablesPointers_bool["commands_stats"] = make_tuple(&variables.commands_stats, false); VariablesPointers_bool["connection_warming"] = make_tuple(&variables.connection_warming, false); VariablesPointers_bool["default_reconnect"] = make_tuple(&variables.default_reconnect, false); - VariablesPointers_bool["enable_client_deprecate_eof"] = make_tuple(&variables.enable_client_deprecate_eof, false); - VariablesPointers_bool["enable_server_deprecate_eof"] = make_tuple(&variables.enable_server_deprecate_eof, false); - VariablesPointers_bool["enable_load_data_local_infile"] = make_tuple(&variables.enable_load_data_local_infile, false); + //VariablesPointers_bool["enable_client_deprecate_eof"] = make_tuple(&variables.enable_client_deprecate_eof, false); + //VariablesPointers_bool["enable_server_deprecate_eof"] = make_tuple(&variables.enable_server_deprecate_eof, false); + //VariablesPointers_bool["enable_load_data_local_infile"] = make_tuple(&variables.enable_load_data_local_infile, false); VariablesPointers_bool["enforce_autocommit_on_reads"] = make_tuple(&variables.enforce_autocommit_on_reads, false); VariablesPointers_bool["firewall_whitelist_enabled"] = make_tuple(&variables.firewall_whitelist_enabled, false); VariablesPointers_bool["kill_backend_connection_when_disconnect"] = make_tuple(&variables.kill_backend_connection_when_disconnect, false); - VariablesPointers_bool["log_mysql_warnings_enabled"] = make_tuple(&variables.log_mysql_warnings_enabled, false); + //VariablesPointers_bool["log_mysql_warnings_enabled"] = make_tuple(&variables.log_mysql_warnings_enabled, false); VariablesPointers_bool["log_unhealthy_connections"] = make_tuple(&variables.log_unhealthy_connections, false); VariablesPointers_bool["monitor_enabled"] = make_tuple(&variables.monitor_enabled, false); VariablesPointers_bool["monitor_replication_lag_group_by_host"] = make_tuple(&variables.monitor_replication_lag_group_by_host, false); @@ -2277,7 +2272,7 @@ void PgSQL_Threads_Handler::init(unsigned int num, size_t stack) { //this->status_variables.p_gauge_array[p_th_gauge::mysql_thread_workers]->Set(num); } else { - if (num_threads == 0) { + if (num_threads==0 && GloVars.global.pgsql_workers) { num_threads = DEFAULT_NUM_THREADS; //default //this->status_variables.p_gauge_array[p_th_gauge::mysql_thread_workers]->Set(DEFAULT_NUM_THREADS); } @@ -2742,14 +2737,6 @@ PgSQL_Thread::~PgSQL_Thread() { if (pgsql_thread___monitor_password) { free(pgsql_thread___monitor_password); pgsql_thread___monitor_password = NULL; } if (pgsql_thread___monitor_dbname) { free(pgsql_thread___monitor_dbname); pgsql_thread___monitor_dbname = NULL; } - /* - if (mysql_thread___monitor_username) { free(mysql_thread___monitor_username); mysql_thread___monitor_username = NULL; } - if (mysql_thread___monitor_password) { free(mysql_thread___monitor_password); mysql_thread___monitor_password = NULL; } - if (mysql_thread___monitor_replication_lag_use_percona_heartbeat) { - free(mysql_thread___monitor_replication_lag_use_percona_heartbeat); - mysql_thread___monitor_replication_lag_use_percona_heartbeat = NULL; - } - */ //if (pgsql_thread___default_schema) { free(pgsql_thread___default_schema); pgsql_thread___default_schema = NULL; } if (pgsql_thread___keep_multiplexing_variables) { free(pgsql_thread___keep_multiplexing_variables); pgsql_thread___keep_multiplexing_variables = NULL; } if (pgsql_thread___firewall_whitelist_errormsg) { free(pgsql_thread___firewall_whitelist_errormsg); pgsql_thread___firewall_whitelist_errormsg = NULL; } @@ -3830,13 +3817,6 @@ void PgSQL_Thread::refresh_variables() { mysql_thread___max_stmts_per_connection = GloPTH->get_variable_int((char*)"max_stmts_per_connection"); - if (mysql_thread___monitor_username) free(mysql_thread___monitor_username); - mysql_thread___monitor_username = GloPTH->get_variable_string((char*)"monitor_username"); - if (mysql_thread___monitor_password) free(mysql_thread___monitor_password); - mysql_thread___monitor_password = GloPTH->get_variable_string((char*)"monitor_password"); - if (mysql_thread___monitor_replication_lag_use_percona_heartbeat) free(mysql_thread___monitor_replication_lag_use_percona_heartbeat); - mysql_thread___monitor_replication_lag_use_percona_heartbeat = GloPTH->get_variable_string((char*)"monitor_replication_lag_use_percona_heartbeat"); - mysql_thread___monitor_wait_timeout = (bool)GloPTH->get_variable_int((char*)"monitor_wait_timeout"); */ pgsql_thread___monitor_writer_is_also_reader = (bool)GloPTH->get_variable_int((char*)"monitor_writer_is_also_reader"); @@ -3861,40 +3841,8 @@ void PgSQL_Thread::refresh_variables() { if (pgsql_thread___monitor_dbname) free(pgsql_thread___monitor_dbname); pgsql_thread___monitor_dbname = GloPTH->get_variable_string((char*)"monitor_dbname"); - /* - mysql_thread___monitor_aws_rds_topology_discovery_interval = GloPTH->get_variable_int((char *)"monitor_aws_rds_topology_discovery_interval"); - mysql_thread___monitor_replication_lag_group_by_host = (bool)GloPTH->get_variable_int((char*)"monitor_replication_lag_group_by_host"); - mysql_thread___monitor_replication_lag_interval = GloPTH->get_variable_int((char*)"monitor_replication_lag_interval"); - mysql_thread___monitor_replication_lag_timeout = GloPTH->get_variable_int((char*)"monitor_replication_lag_timeout"); - mysql_thread___monitor_replication_lag_count = GloPTH->get_variable_int((char*)"monitor_replication_lag_count"); - mysql_thread___monitor_groupreplication_healthcheck_interval = GloPTH->get_variable_int((char*)"monitor_groupreplication_healthcheck_interval"); - mysql_thread___monitor_groupreplication_healthcheck_timeout = GloPTH->get_variable_int((char*)"monitor_groupreplication_healthcheck_timeout"); - mysql_thread___monitor_groupreplication_healthcheck_max_timeout_count = GloPTH->get_variable_int((char*)"monitor_groupreplication_healthcheck_max_timeout_count"); - mysql_thread___monitor_groupreplication_max_transactions_behind_count = GloPTH->get_variable_int((char*)"monitor_groupreplication_max_transactions_behind_count"); - mysql_thread___monitor_groupreplication_max_transaction_behind_for_read_only = GloPTH->get_variable_int((char*)"monitor_groupreplication_max_transactions_behind_for_read_only"); - mysql_thread___monitor_galera_healthcheck_interval = GloPTH->get_variable_int((char*)"monitor_galera_healthcheck_interval"); - mysql_thread___monitor_galera_healthcheck_timeout = GloPTH->get_variable_int((char*)"monitor_galera_healthcheck_timeout"); - mysql_thread___monitor_galera_healthcheck_max_timeout_count = GloPTH->get_variable_int((char*)"monitor_galera_healthcheck_max_timeout_count"); - mysql_thread___monitor_query_interval = GloPTH->get_variable_int((char*)"monitor_query_interval"); - mysql_thread___monitor_query_timeout = GloPTH->get_variable_int((char*)"monitor_query_timeout"); - mysql_thread___monitor_slave_lag_when_null = GloPTH->get_variable_int((char*)"monitor_slave_lag_when_null"); - mysql_thread___monitor_threads_min = GloPTH->get_variable_int((char*)"monitor_threads_min"); - mysql_thread___monitor_threads_max = GloPTH->get_variable_int((char*)"monitor_threads_max"); - mysql_thread___monitor_threads_queue_maxsize = GloPTH->get_variable_int((char*)"monitor_threads_queue_maxsize"); - mysql_thread___monitor_local_dns_cache_ttl = GloPTH->get_variable_int((char*)"monitor_local_dns_cache_ttl"); - mysql_thread___monitor_local_dns_cache_refresh_interval = GloPTH->get_variable_int((char*)"monitor_local_dns_cache_refresh_interval"); - mysql_thread___monitor_local_dns_resolver_queue_maxsize = GloPTH->get_variable_int((char*)"monitor_local_dns_resolver_queue_maxsize"); - */ if (pgsql_thread___firewall_whitelist_errormsg) free(pgsql_thread___firewall_whitelist_errormsg); pgsql_thread___firewall_whitelist_errormsg = GloPTH->get_variable_string((char*)"firewall_whitelist_errormsg"); - /* - if (mysql_thread___ldap_user_variable) free(mysql_thread___ldap_user_variable); - mysql_thread___ldap_user_variable = GloPTH->get_variable_string((char*)"ldap_user_variable"); - if (mysql_thread___add_ldap_user_comment) free(mysql_thread___add_ldap_user_comment); - mysql_thread___add_ldap_user_comment = GloPTH->get_variable_string((char*)"add_ldap_user_comment"); - if (mysql_thread___default_session_track_gtids) free(mysql_thread___default_session_track_gtids); - mysql_thread___default_session_track_gtids = GloPTH->get_variable_string((char*)"default_session_track_gtids"); - */ for (int i = 0; i < PGSQL_NAME_LAST_LOW_WM; i++) { if (pgsql_thread___default_variables[i]) { @@ -3951,14 +3899,7 @@ void PgSQL_Thread::refresh_variables() { pgsql_thread___handle_unknown_charset = GloPTH->get_variable_int((char*)"handle_unknown_charset"); - /* - mysql_thread___have_compress = (bool)GloPTH->get_variable_int((char*)"have_compress"); - - mysql_thread___enforce_autocommit_on_reads = (bool)GloPTH->get_variable_int((char*)"enforce_autocommit_on_reads"); - mysql_thread___autocommit_false_not_reusable = (bool)GloPTH->get_variable_int((char*)"autocommit_false_not_reusable"); - mysql_thread___autocommit_false_is_transaction = (bool)GloPTH->get_variable_int((char*)"autocommit_false_is_transaction"); - */ pgsql_thread___commands_stats = (bool)GloPTH->get_variable_int((char*)"commands_stats"); pgsql_thread___query_digests = (bool)GloPTH->get_variable_int((char*)"query_digests"); pgsql_thread___query_digests_lowercase = (bool)GloPTH->get_variable_int((char*)"query_digests_lowercase"); @@ -3972,26 +3913,6 @@ void PgSQL_Thread::refresh_variables() { variables.query_cache_stores_empty_result = (bool)GloPTH->get_variable_int((char*)"query_cache_stores_empty_result"); - /* - variables.min_num_servers_lantency_awareness = GloPTH->get_variable_int((char*)"min_num_servers_lantency_awareness"); - variables.aurora_max_lag_ms_only_read_from_replicas = GloPTH->get_variable_int((char*)"aurora_max_lag_ms_only_read_from_replicas"); - variables.stats_time_backend_query = (bool)GloPTH->get_variable_int((char*)"stats_time_backend_query"); - variables.stats_time_query_processor = (bool)GloPTH->get_variable_int((char*)"stats_time_query_processor"); - - mysql_thread___client_session_track_gtid = (bool)GloPTH->get_variable_int((char*)"client_session_track_gtid"); - - mysql_thread___enable_client_deprecate_eof = (bool)GloPTH->get_variable_int((char*)"enable_client_deprecate_eof"); - mysql_thread___enable_server_deprecate_eof = (bool)GloPTH->get_variable_int((char*)"enable_server_deprecate_eof"); - */ - pgsql_thread___enable_load_data_local_infile = (bool)GloPTH->get_variable_int((char*)"enable_load_data_local_infile"); - /*mysql_thread___log_mysql_warnings_enabled = (bool)GloPTH->get_variable_int((char*)"log_mysql_warnings_enabled"); - mysql_thread___client_host_cache_size = GloPTH->get_variable_int((char*)"client_host_cache_size"); - mysql_thread___client_host_error_counts = GloPTH->get_variable_int((char*)"client_host_error_counts"); - mysql_thread___handle_warnings = GloPTH->get_variable_int((char*)"handle_warnings"); -#ifdef DEBUG - mysql_thread___session_debug = (bool)GloPTH->get_variable_int((char*)"session_debug"); -#endif // DEBUG -*/ GloPTH->wrunlock(); pthread_mutex_unlock(&GloVars.global.ext_glopth_mutex); } @@ -5050,8 +4971,6 @@ unsigned int PgSQL_Threads_Handler::get_non_idle_client_connections() { q += __sync_fetch_and_add(&thr->mysql_sessions->len, 0); } } - //this->status_variables.p_gauge_array[p_th_gauge::client_connections_non_idle]->Set(q); - return q; } #endif // IDLE_THREADS @@ -5067,9 +4986,6 @@ unsigned long long PgSQL_Threads_Handler::get_pgsql_backend_buffers_bytes() { q += __sync_fetch_and_add(&thr->status_variables.stvar[st_var_mysql_backend_buffers_bytes], 0); } } - //const auto& cur_val = this->status_variables.p_counter_array[p_th_gauge::mysql_backend_buffers_bytes]->Value(); - //this->status_variables.p_counter_array[p_th_gauge::mysql_backend_buffers_bytes]->Increment(q - cur_val); - return q; } @@ -5094,8 +5010,6 @@ unsigned long long PgSQL_Threads_Handler::get_pgsql_frontend_buffers_bytes() { } } #endif // IDLE_THREADS - //this->status_variables.p_counter_array[p_th_gauge::mysql_frontend_buffers_bytes]->Increment(q); - return q; } @@ -5132,43 +5046,6 @@ void PgSQL_Threads_Handler::p_update_metrics() { get_pgsql_backend_buffers_bytes(); get_pgsql_frontend_buffers_bytes(); get_pgsql_session_internal_bytes(); -/* - for (unsigned int i = 0; i < sizeof(PgSQL_Thread_status_variables_counter_array) / sizeof(mythr_st_vars_t); i++) { - if (PgSQL_Thread_status_variables_counter_array[i].name) { - get_status_variable( - PgSQL_Thread_status_variables_counter_array[i].v_idx, - PgSQL_Thread_status_variables_counter_array[i].m_idx, - PgSQL_Thread_status_variables_counter_array[i].conv - ); - } - } - // Gauge variables - for (unsigned int i = 0; i < sizeof(PgSQL_Thread_status_variables_gauge_array) / sizeof(mythr_g_st_vars_t); i++) { - if (PgSQL_Thread_status_variables_gauge_array[i].name) { - get_status_variable( - PgSQL_Thread_status_variables_gauge_array[i].v_idx, - PgSQL_Thread_status_variables_gauge_array[i].m_idx, - PgSQL_Thread_status_variables_gauge_array[i].conv - ); - } - } -*/ -/* - this->status_variables.p_gauge_array[p_th_gauge::mysql_wait_timeout]->Set(this->variables.wait_timeout); - this->status_variables.p_gauge_array[p_th_gauge::mysql_monitor_ping_interval]->Set(this->variables.monitor_ping_interval / 1000.0); - this->status_variables.p_gauge_array[p_th_gauge::mysql_max_connections]->Set(this->variables.max_connections); - this->status_variables.p_gauge_array[p_th_gauge::mysql_monitor_enabled]->Set(this->variables.monitor_enabled); - this->status_variables.p_gauge_array[p_th_gauge::mysql_monitor_ping_timeout]->Set(this->variables.monitor_ping_timeout / 1000.0); - this->status_variables.p_gauge_array[p_th_gauge::mysql_monitor_ping_max_failures]->Set(this->variables.monitor_ping_max_failures); - this->status_variables.p_gauge_array[p_th_gauge::mysql_monitor_aws_rds_topology_discovery_interval]->Set(this->variables.monitor_aws_rds_topology_discovery_interval); - this->status_variables.p_gauge_array[p_th_gauge::mysql_monitor_read_only_interval]->Set(this->variables.monitor_read_only_interval/1000.0); - this->status_variables.p_gauge_array[p_th_gauge::mysql_monitor_read_only_timeout]->Set(this->variables.monitor_read_only_timeout/1000.0); - this->status_variables.p_gauge_array[p_th_gauge::mysql_monitor_writer_is_also_reader]->Set(this->variables.monitor_writer_is_also_reader); - this->status_variables.p_gauge_array[p_th_gauge::mysql_monitor_replication_lag_group_by_host]->Set(this->variables.monitor_replication_lag_group_by_host); - this->status_variables.p_gauge_array[p_th_gauge::mysql_monitor_replication_lag_interval]->Set(this->variables.monitor_replication_lag_interval / 1000.0); - this->status_variables.p_gauge_array[p_th_gauge::mysql_monitor_replication_lag_timeout]->Set(this->variables.monitor_replication_lag_timeout / 1000.0); - this->status_variables.p_gauge_array[p_th_gauge::mysql_monitor_history]->Set(this->variables.monitor_history / 1000.0); -*/ } void PgSQL_Thread::Get_Memory_Stats() { diff --git a/lib/ProxySQL_Admin.cpp b/lib/ProxySQL_Admin.cpp index daebe79ced..fa79df25b6 100644 --- a/lib/ProxySQL_Admin.cpp +++ b/lib/ProxySQL_Admin.cpp @@ -2248,9 +2248,6 @@ void * admin_main_loop(void *arg) { __sync_fetch_and_add(&admin_load_main_,1); while (glovars.shutdown==0 && *shutdown==0) { - //int *client; - //int client_t; - //socklen_t addr_size = sizeof(addr); pthread_t child; size_t stacks; unsigned long long curtime=monotonic_time(); @@ -2283,13 +2280,9 @@ void * admin_main_loop(void *arg) { passarg->addr_size = sizeof(custom_sockaddr); memset(passarg->addr, 0, sizeof(custom_sockaddr)); passarg->client_t = accept(fds[i].fd, (struct sockaddr*)passarg->addr, &passarg->addr_size); -// printf("Connected: %s:%d sock=%d\n", inet_ntoa(addr.sin_addr), ntohs(addr.sin_port), client_t); pthread_attr_getstacksize (&attr, &stacks); -// printf("Default stack size = %d\n", stacks); pthread_mutex_lock (&sock_mutex); - //client=(int *)malloc(sizeof(int)); - //*client= client_t; - //if ( pthread_create(&child, &attr, child_func[callback_func[i]], client) != 0 ) { + if ( pthread_create(&child, &attr, child_func[callback_func[i]], passarg) != 0 ) { // LCOV_EXCL_START perror("pthread_create"); @@ -2315,12 +2308,15 @@ void * admin_main_loop(void *arg) { if (resultset) { SQLite3_result * resultset2 = NULL; - // In debug, run the code to generate metrics so that it can be tested even if the web interface plugin isn't loaded. - #ifdef DEBUG - if (true) { - #else - if (GloVars.web_interface_plugin) { - #endif + // In debug, run the code to generate metrics so that it can be tested even if + // the 'web_interface_plugin' isn't loaded. + if ( + #ifdef DEBUG + true + #else + GloVars.web_interface_plugin + #endif + ) { resultset2 = MyHGM->SQL3_Connection_Pool(false); } GloProxyStats->MyHGM_Handler_sets(resultset, resultset2); @@ -2378,7 +2374,7 @@ void * admin_main_loop(void *arg) { nfds++; unsigned int j; i=0; j=0; - for (j=0; jifaces->len; j++) { + for (j=0; j < S_amll.ifaces_mysql->ifaces->len && GloVars.global.mysql_admin; j++) { char *add=NULL; char *port=NULL; char *sn=(char *)S_amll.ifaces_mysql->ifaces->index(j); bool is_ipv6 = false; char *h = NULL; @@ -2402,7 +2398,7 @@ void * admin_main_loop(void *arg) { #else int s = ( atoi(port) ? listen_on_port(add, atoi(port), 128) : listen_on_unix(add, 128)); #endif - //if (s>0) { fds[nfds].fd=s; fds[nfds].events=POLLIN; fds[nfds].revents=0; callback_func[nfds]=0; socket_names[nfds]=strdup(sn); nfds++; } + if (s > 0) { fds[nfds].fd = s; fds[nfds].events = POLLIN; @@ -2418,7 +2414,7 @@ void * admin_main_loop(void *arg) { } i = 0; j = 0; - for (; j < S_amll.ifaces_pgsql->ifaces->len; j++) { + for (; j < S_amll.ifaces_pgsql->ifaces->len && GloVars.global.pgsql_admin; j++) { char* add = NULL; char* port = NULL; char* sn = (char*)S_amll.ifaces_pgsql->ifaces->index(j); bool is_ipv6 = false; char* h = NULL; @@ -2443,7 +2439,7 @@ void * admin_main_loop(void *arg) { #else int s = (atoi(port) ? listen_on_port(add, atoi(port), 128) : listen_on_unix(add, 128)); #endif - //if (s>0) { fds[nfds].fd=s; fds[nfds].events=POLLIN; fds[nfds].revents=0; callback_func[nfds]=0; socket_names[nfds]=strdup(sn); nfds++; } + if (s > 0) { fds[nfds].fd = s; fds[nfds].events = POLLIN; @@ -2461,7 +2457,7 @@ void * admin_main_loop(void *arg) { } } - //if (__sync_add_and_fetch(shutdown,0)==0) __sync_add_and_fetch(shutdown,1); + for (i=0; iexecute("DELETE FROM runtime_mysql_query_rules_fast_routing"); } else { @@ -4347,6 +4350,13 @@ void ProxySQL_Admin::save_mysql_query_rules_fast_routing_from_runtime(bool _runt } void ProxySQL_Admin::save_pgsql_query_rules_fast_routing_from_runtime(bool _runtime) { + // Check if PgSQL Query Processor is initialized (issue 5186) + // Prevent crashes during PROXYSQL START race conditions + if (GloPgQPro == nullptr) { + proxy_warning("PgSQL Query Processor not initialized, skipping save_pgsql_query_rules_fast_routing_from_runtime\n"); + return; + } + if (_runtime) { admindb->execute("DELETE FROM runtime_pgsql_query_rules_fast_routing"); } @@ -4416,6 +4426,13 @@ void ProxySQL_Admin::save_pgsql_query_rules_fast_routing_from_runtime(bool _runt } void ProxySQL_Admin::save_mysql_query_rules_from_runtime(bool _runtime) { + // Check if Query Processor is initialized (issue 5186) + // Prevent crashes during PROXYSQL START race conditions + if (GloMyQPro == nullptr) { + proxy_warning("MySQL Query Processor not initialized, skipping save_mysql_query_rules_from_runtime\n"); + return; + } + if (_runtime) { admindb->execute("DELETE FROM runtime_mysql_query_rules"); } else { @@ -4501,6 +4518,13 @@ void ProxySQL_Admin::save_mysql_query_rules_from_runtime(bool _runtime) { } void ProxySQL_Admin::save_pgsql_query_rules_from_runtime(bool _runtime) { + // Check if PgSQL Query Processor is initialized (issue 5186) + // Prevent crashes during PROXYSQL START race conditions + if (GloPgQPro == nullptr) { + proxy_warning("PgSQL Query Processor not initialized, skipping save_pgsql_query_rules_from_runtime\n"); + return; + } + if (_runtime) { admindb->execute("DELETE FROM runtime_pgsql_query_rules"); } diff --git a/lib/ProxySQL_GloVars.cpp b/lib/ProxySQL_GloVars.cpp index f0da1a812a..b006aaf418 100644 --- a/lib/ProxySQL_GloVars.cpp +++ b/lib/ProxySQL_GloVars.cpp @@ -197,6 +197,10 @@ ProxySQL_GlobalVariables::ProxySQL_GlobalVariables() : global.gdbg=false; global.nostart=false; global.foreground=false; + global.mysql_workers=true; + global.pgsql_workers=true; + global.mysql_admin=true; + global.pgsql_admin=true; global.my_monitor=true; global.pg_monitor=true; #ifdef IDLE_THREADS @@ -262,6 +266,12 @@ ProxySQL_GlobalVariables::ProxySQL_GlobalVariables() : #endif /* DEBUG */ opt->add((const char *)"",0,0,0,(const char *)"Starts only the admin service",(const char *)"-n",(const char *)"--no-start"); opt->add((const char *)"",0,0,0,(const char *)"Do not start Monitor Module",(const char *)"-M",(const char *)"--no-monitor"); + opt->add((const char *)"",0,1,0,(const char *)"Do not start MySQL Monitor Module",(const char *)"--mysql-monitor"); + opt->add((const char *)"",0,1,0,(const char *)"Do not start PgSQL Monitor Module",(const char *)"--pgsql-monitor"); + opt->add((const char *)"",0,1,0,(const char *)"Do not start MySQL Worker Threads",(const char *)"--mysql-workers"); + opt->add((const char *)"",0,1,0,(const char *)"Do not start PgSQL Worker Threads",(const char *)"--pgsql-workers"); + opt->add((const char *)"",0,1,0,(const char *)"Do not start MySQL Admin Module",(const char *)"--mysql-admin"); + opt->add((const char *)"",0,1,0,(const char *)"Do not start PgSQL Admin Module",(const char *)"--pgsql-admin"); opt->add((const char *)"",0,0,0,(const char *)"Run in foreground",(const char *)"-f",(const char *)"--foreground"); #ifdef SO_REUSEPORT opt->add((const char *)"",0,0,0,(const char *)"Use SO_REUSEPORT",(const char *)"-r",(const char *)"--reuseport"); @@ -490,6 +500,60 @@ void ProxySQL_GlobalVariables::process_opts_post() { global.pg_monitor=false; } + if (opt->isSet("--mysql-monitor")) { + string val {}; + opt->get("--mysql-monitor")->getString(val); + + if (val == "false" || val == "0") { + global.my_monitor = false; + } + } + + if (opt->isSet("--pgsql-monitor")) { + string val {}; + opt->get("--pgsql-monitor")->getString(val); + + if (val == "false" || val == "0") { + global.pg_monitor = false; + } + } + + if (opt->isSet("--mysql-workers")) { + string val {}; + opt->get("--mysql-workers")->getString(val); + + if (val == "false" || val == "0") { + global.mysql_workers = false; + } + } + + if (opt->isSet("--pgsql-workers")) { + string val {}; + opt->get("--pgsql-workers")->getString(val); + + if (val == "false" || val == "0") { + global.pgsql_workers = false; + } + } + + if (opt->isSet("--mysql-admin")) { + string val {}; + opt->get("--mysql-admin")->getString(val); + + if (val == "false" || val == "0") { + global.mysql_admin = false; + } + } + + if (opt->isSet("--pgsql-admin")) { + string val {}; + opt->get("--pgsql-admin")->getString(val); + + if (val == "false" || val == "0") { + global.pgsql_admin = false; + } + } + #ifdef SO_REUSEPORT { struct utsname unameData; diff --git a/src/main.cpp b/src/main.cpp index 52ee9ef4a6..991107a6cf 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -360,6 +360,15 @@ static bool check_openssl_version() { void ProxySQL_Main_init_SSL_module() { + // Check if SSL context is already initialized (issue 5186) + // This prevents SSL context corruption during PROXYSQL STOP/START restart cycles + if (GloVars.global.ssl_ctx != NULL) { + proxy_info("SSL context already initialized at %p, skipping reinitialization\n", GloVars.global.ssl_ctx); + return; + } + + proxy_info("Initializing new SSL context\n"); + int rc = SSL_library_init(); if (rc==0) { proxy_error("%s\n", SSL_alert_desc_string_long(rc)); @@ -377,6 +386,7 @@ void ProxySQL_Main_init_SSL_module() { proxy_error("Unable to initialize SSL. Shutting down...\n"); exit(EXIT_SUCCESS); // we exit gracefully to not be restarted } + proxy_info("SSL context created successfully at %p\n", GloVars.global.ssl_ctx); if (!SSL_CTX_set_min_proto_version(GloVars.global.ssl_ctx,TLS1_VERSION)) { proxy_error("Unable to initialize SSL. SSL_set_min_proto_version failed. Shutting down...\n"); exit(EXIT_SUCCESS); // we exit gracefully to not be restarted @@ -674,10 +684,21 @@ void* unified_query_cache_purge_thread(void *arg) { return NULL; } -/*void* pgsql_shared_query_cache_funct(void* arg) { - GloPgQC->purgeHash_thread(NULL); - return NULL; -}*/ +template +void update_global_variable(const string& name, T& var) { + const Setting& root { GloVars.confFile->cfg.getRoot() }; + + if (root.exists(name)==true) { + T new_val {}; + bool rc { root.lookupValue(name, new_val) }; + + if (rc == true) { + var = new_val; + } else { + proxy_error("The config file is configured with an invalid '%s'\n", name.c_str()); + } + } +} void ProxySQL_Main_process_global_variables(int argc, const char **argv) { GloVars.errorlog = NULL; @@ -733,27 +754,17 @@ void ProxySQL_Main_process_global_variables(int argc, const char **argv) { } } } + // if cluster_sync_interfaces is true, interfaces variables are synced too - if (root.exists("cluster_sync_interfaces")==true) { - bool value_bool; - bool rc; - rc=root.lookupValue("cluster_sync_interfaces", value_bool); - if (rc==true) { - GloVars.cluster_sync_interfaces=value_bool; - } else { - proxy_error("The config file is configured with an invalid cluster_sync_interfaces\n"); - } - } - if (root.exists("set_thread_name")==true) { - bool value_bool; - bool rc; - rc=root.lookupValue("set_thread_name", value_bool); - if (rc==true) { - GloVars.set_thread_name=value_bool; - } else { - proxy_error("The config file is configured with an invalid set_thread_name\n"); - } - } + update_global_variable("cluster_sync_interfaces", GloVars.cluster_sync_interfaces); + update_global_variable("set_thread_name", GloVars.set_thread_name); + update_global_variable("mysql-workers", GloVars.global.mysql_workers); + update_global_variable("pgsql-workers", GloVars.global.pgsql_workers); + update_global_variable("mysql-admin", GloVars.global.mysql_admin); + update_global_variable("pgsql-admin", GloVars.global.pgsql_admin); + update_global_variable("mysql-monitor", GloVars.global.my_monitor); + update_global_variable("pgsql-monitor", GloVars.global.pg_monitor); + if (root.exists("pidfile")==true) { string pidfile_path; bool rc; @@ -888,6 +899,20 @@ void ProxySQL_Main_process_global_variables(int argc, const char **argv) { GloVars.confFile->ReadGlobals(); GloVars.process_opts_post(); + + // Coherence check on global variables status + if (!GloVars.global.mysql_admin && !GloVars.global.pgsql_admin) { + proxy_info("All Admin interfaces, MySQL and PostgreSQL, disabled by config\n"); + } + if (!GloVars.global.mysql_workers && !GloVars.global.pgsql_workers) { + proxy_info("All worker threads, MySQL and PostgreSQL, disabled by config\n"); + } + if (!GloVars.global.my_monitor && !GloVars.global.pg_monitor) { + proxy_info("All Monitoring, MySQL and PostgreSQL, disabled by config\n"); + } + if (!GloVars.global.pgsql_workers && !GloVars.global.pgsql_admin && !GloVars.global.pg_monitor) { + proxy_info("PostgreSQL support fully disabled by config\n"); + } } void ProxySQL_Main_init_main_modules() { @@ -943,6 +968,41 @@ void ProxySQL_Main_init_Admin_module(const bootstrap_info_t& bootstrap_info) { GloAdmin = new ProxySQL_Admin(); GloAdmin->init(bootstrap_info); GloAdmin->print_version(); + + // Synchronize monitor enabled variables with global settings + // + // The CLI arguments --mysql-monitor and --pgsql-monitor correctly set the internal + // global variables GloVars.global.my_monitor and GloVars.global.pg_monitor, which + // control whether monitor threads are started. However, these internal variables + // are not automatically synchronized with the admin interface variables + // mysql-monitor_enabled and pgsql-monitor_enabled that users can query via + // SELECT variable_value FROM global_variables WHERE variable_name='mysql-monitor_enabled'. + // + // Without this synchronization, the admin interface would incorrectly show + // mysql-monitor_enabled=true and pgsql-monitor_enabled=true even when the + // monitor modules are disabled via CLI arguments, breaking user expectations + // and automated testing that relies on these admin interface variables. + // + // This code ensures that the admin interface variables accurately reflect the + // actual monitor module state as controlled by CLI arguments. + { + char query[256]; + // Set mysql-monitor_enabled based on global.my_monitor + snprintf(query, sizeof(query), + "INSERT OR REPLACE INTO global_variables VALUES('mysql-monitor_enabled','%s')", + GloVars.global.my_monitor ? "true" : "false"); + GloAdmin->admindb->execute(query); + + // Set pgsql-monitor_enabled based on global.pg_monitor + snprintf(query, sizeof(query), + "INSERT OR REPLACE INTO global_variables VALUES('pgsql-monitor_enabled','%s')", + GloVars.global.pg_monitor ? "true" : "false"); + GloAdmin->admindb->execute(query); + + proxy_info("Monitor variables synchronized: mysql-monitor_enabled=%s, pgsql-monitor_enabled=%s\n", + GloVars.global.my_monitor ? "true" : "false", + GloVars.global.pg_monitor ? "true" : "false"); + } if (binary_sha1) { proxy_info("ProxySQL SHA1 checksum: %s\n", binary_sha1); } @@ -988,7 +1048,7 @@ void ProxySQL_Main_init_MySQL_Threads_Handler_module() { proxy_warning("proxysql instance running without --idle-threads : enabling it can potentially improve performance\n"); } #endif // IDLE_THREADS - for (i=0; inum_threads; i++) { + for (i=0; i < GloMTH->num_threads && GloVars.global.mysql_workers; i++) { GloMTH->create_thread(i,mysql_worker_thread_func, false); #ifdef IDLE_THREADS if (GloVars.global.idle_threads) { @@ -1012,7 +1072,7 @@ void ProxySQL_Main_init_PgSQL_Threads_Handler_module() { proxy_warning("proxysql instance running without --idle-threads : enabling it can potentially improve performance\n"); } #endif // IDLE_THREADS - for (i = 0; i < GloPTH->num_threads; i++) { + for (i = 0; i < GloPTH->num_threads && GloVars.global.pgsql_workers; i++) { GloPTH->create_thread(i, pgsql_worker_thread_func, false); #ifdef IDLE_THREADS if (GloVars.global.idle_threads) { @@ -1322,6 +1382,11 @@ void ProxySQL_Main_init() { #else glovars.has_debug=false; #endif /* DEBUG */ + + // Initialize stop state management for issue 5186 + glovars.stop_state = STOP_STATE_RUNNING; + glovars.active_admin_queries = 0; + // __thr_sfp=l_mem_init(); proxysql_init_debug_prometheus_metrics(); } diff --git a/test/tap/groups/groups.json b/test/tap/groups/groups.json index f5223ba2ba..c62c6d215d 100644 --- a/test/tap/groups/groups.json +++ b/test/tap/groups/groups.json @@ -7,6 +7,9 @@ "admin_various_commands3-t" : [ "default-g1","mysql-auto_increment_delay_multiplex=0-g1","mysql-multiplexing=false-g1","mysql-query_digests=0-g1","mysql-query_digests_keep_comment=1-g1","mysql84-g1","mysql84-gr-g1","mysql90-g1","mysql90-gr-g1","mysql91-g1","mysql91-gr-g1","mysql92-g1","mysql92-gr-g1","mysql93-g1","mysql93-gr-g1" ], "admin_various_commands-t" : [ "default-g1","mysql-auto_increment_delay_multiplex=0-g1","mysql-multiplexing=false-g1","mysql-query_digests=0-g1","mysql-query_digests_keep_comment=1-g1","mysql84-g1","mysql84-gr-g1","mysql90-g1","mysql90-gr-g1","mysql91-g1","mysql91-gr-g1","mysql92-g1","mysql92-gr-g1","mysql93-g1","mysql93-gr-g1" ], "basic-t" : [ "default-g1","mysql-auto_increment_delay_multiplex=0-g1","mysql-multiplexing=false-g1","mysql-query_digests=0-g1","mysql-query_digests_keep_comment=1-g1","mysql84-g1","mysql84-gr-g1","mysql90-g1","mysql90-gr-g1","mysql91-g1","mysql91-gr-g1","mysql92-g1","mysql92-gr-g1","mysql93-g1","mysql93-gr-g1" ], +"test_proxysql_stop_query_handling-t" : [ "default-g1" ], + "reg_test_4960_modules_startup-t" : [ "default-g1" ], + "reg_test_4960_monitor_modules-t" : [ "default-g1" ], "charset_unsigned_int-t" : [ "default-g1","mysql-auto_increment_delay_multiplex=0-g1","mysql-multiplexing=false-g1","mysql-query_digests=0-g1","mysql-query_digests_keep_comment=1-g1" ], "clickhouse_php_conn-t" : [ "default-g1","mysql-auto_increment_delay_multiplex=0-g1","mysql-multiplexing=false-g1","mysql-query_digests=0-g1","mysql-query_digests_keep_comment=1-g1" ], "envvars-t" : [ "default-g1","mysql-auto_increment_delay_multiplex=0-g1","mysql-multiplexing=false-g1","mysql-query_digests=0-g1","mysql-query_digests_keep_comment=1-g1","mysql84-g1","mysql84-gr-g1","mysql90-g1","mysql90-gr-g1","mysql91-g1","mysql91-gr-g1","mysql92-g1","mysql92-gr-g1","mysql93-g1","mysql93-gr-g1" ], diff --git a/test/tap/tests/reg_test_4960_modules_startup-t.cpp b/test/tap/tests/reg_test_4960_modules_startup-t.cpp new file mode 100644 index 0000000000..d2a7342d61 --- /dev/null +++ b/test/tap/tests/reg_test_4960_modules_startup-t.cpp @@ -0,0 +1,759 @@ +/** + * @file reg_test_4960_modules_startup-t.cpp + * @brief TAP test for verifying module enable/disable functionality introduced in PR #4960. + * + * This test verifies that ProxySQL can start correctly with various combinations of + * MySQL/PostgreSQL worker, admin, and monitor modules enabled or disabled via both + * command line arguments and configuration file settings. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mysql.h" +#include "mysqld_error.h" + +#include "proxysql_utils.h" +#include "tap.h" +#include "command_line.h" +#include "utils.h" +#include "test_proxysql_stop_query_handling.hpp" + +using std::string; +using std::vector; + +// Global PID for ProxySQL process - safe since only one test runs at a time +static int g_proxysql_pid = -1; + +struct TestCase { + string name; + vector cli_args; + string config_content; + int mysql_admin_port; + int pgsql_admin_port; + int mysql_worker_port; + int pgsql_worker_port; + bool should_start; + bool mysql_admin_expected; + bool pgsql_admin_expected; + bool mysql_worker_expected; + bool pgsql_worker_expected; +}; + +int launch_proxysql_instance(const TestCase& test_case, const CommandLine& cl) { + const string test_datadir = string { cl.workdir } + "reg_test_4960_node_" + test_case.name; + const string test_config_file = test_datadir + "/proxysql.cfg"; + const string test_log_file = test_datadir + "/proxysql.log"; + + diag(" Creating test environment:"); + diag(" Datadir: %s", test_datadir.c_str()); + diag(" Config file: %s", test_config_file.c_str()); + + // Clean up existing datadir if it exists + string cleanup_cmd = "rm -rf " + test_datadir; + int cleanup_result = system(cleanup_cmd.c_str()); + (void)cleanup_result; // Suppress unused warning + + // Create test datadir + string mkdir_cmd = "mkdir -p " + test_datadir; + int mkdir_result = system(mkdir_cmd.c_str()); + (void)mkdir_result; // Suppress unused warning + + // Create config file + std::ofstream config_file(test_config_file); + config_file << test_case.config_content; + config_file.close(); + + diag(" Config file contents:"); + // Show config file contents (with proper indentation) + std::istringstream config_stream(test_case.config_content); + string config_line; + while (std::getline(config_stream, config_line)) { + diag(" %s", config_line.c_str()); + } + + // Launch ProxySQL using thread (now safe with global PID) + const std::string config_file_copy = test_config_file; + const std::string log_file_copy = test_log_file; + + std::thread launch_proxy([&cl, &test_case, config_file_copy, log_file_copy] () -> void { + to_opts_t wexecvp_opts {}; + wexecvp_opts.poll_to_us = 100 * 1000; + wexecvp_opts.waitpid_delay_us = 500 * 1000; + wexecvp_opts.timeout_us = 20000 * 1000; // 20s timeout + wexecvp_opts.sigkill_to_us = 3000 * 1000; + + const string proxysql_path { string { getenv("WORKSPACE") } + "/src/proxysql" }; + vector proxy_args = { "-f", "-c", config_file_copy.c_str() }; + + // Add test-specific CLI arguments + for (const auto& arg : test_case.cli_args) { + proxy_args.push_back(arg); + } + + // Build and display the full command for manual testing + string full_command = proxysql_path; + for (const auto& arg : proxy_args) { + full_command += " " + string(arg); + } + diag(" Command to execute manually:"); + diag(" %s", full_command.c_str()); + + string s_stdout {}; + string s_stderr {}; + + diag(" Starting ProxySQL in background..."); + g_proxysql_pid = wexecvp(proxysql_path, proxy_args, wexecvp_opts, s_stdout, s_stderr); + + if (g_proxysql_pid <= 0) { + diag("Failed to start ProxySQL for test case: %s", test_case.name.c_str()); + if (!s_stderr.empty()) { + diag("stderr: %s", s_stderr.c_str()); + } + return; + } + + diag(" ProxySQL started with PID: %d", g_proxysql_pid); + + // Write process output to log file + try { + std::ofstream os_logfile { log_file_copy, std::ios::out }; + os_logfile << s_stderr; + } catch (const std::exception& ex) { + fprintf(stderr, "File %s, line %d, Error: %s\n", __FILE__, __LINE__, ex.what()); + } + }); + + launch_proxy.detach(); + + // Give ProxySQL time to start up + diag(" Waiting for ProxySQL to start (3 seconds)..."); + sleep(3); + diag(" ProxySQL startup wait completed"); + + return EXIT_SUCCESS; +} + +bool check_port_listening(int port, int timeout = 2) { + for (int i = 0; i < timeout; i++) { + string cmd = "nc -z 127.0.0.1 " + std::to_string(port) + " 2>/dev/null"; + int result = system(cmd.c_str()); + diag("DEBUG: nc -z 127.0.0.1 %d returned %d", port, result); + if (result == 0) { + diag("DEBUG: Port %d is listening", port); + return true; + } else if (result != 0 && i == 0) { + // Check if nc command exists (only on first attempt to avoid spam) + string check_cmd = "which nc >/dev/null 2>&1"; + if (system(check_cmd.c_str()) != 0) { + diag("ERROR: 'nc' (netcat) command not found. Please install netcat to run this test."); + diag("On Ubuntu/Debian: sudo apt-get install netcat-openbsd"); + diag("On CentOS/RHEL: sudo yum install nc"); + diag("On Fedora: sudo dnf install nmap-ncat"); + exit(EXIT_FAILURE); + } + diag("DEBUG: Port %d is NOT listening (result=%d)", port, result); + } + } + diag("DEBUG: Port %d timeout completed, returning false", port); + return false; +} + +int run_test_case(const TestCase& test_case, const CommandLine& cl) { + int proxy_pid = -1; + int result = EXIT_SUCCESS; + + diag("Running test case: %s", test_case.name.c_str()); + diag(" Expected MySQL admin: %s (port %d)", + test_case.mysql_admin_expected ? "YES" : "NO", test_case.mysql_admin_port); + diag(" Expected PgSQL admin: %s (port %d)", + test_case.pgsql_admin_expected ? "YES" : "NO", test_case.pgsql_admin_port); + diag(" Expected MySQL worker: %s (port %d)", + test_case.mysql_worker_expected ? "YES" : "NO", test_case.mysql_worker_port); + diag(" Expected PgSQL worker: %s (port %d)", + test_case.pgsql_worker_expected ? "YES" : "NO", test_case.pgsql_worker_port); + + // Display CLI arguments if any + if (!test_case.cli_args.empty()) { + diag(" CLI arguments:"); + for (size_t i = 0; i < test_case.cli_args.size(); i++) { + diag(" %s", test_case.cli_args[i]); + } + } + + // CLEANUP FIRST: Kill any existing ProxySQL processes that might be listening on our ports + diag(" Pre-test cleanup: killing any existing ProxySQL processes on test ports..."); + string cleanup_mysql_admin = "fuser -k " + std::to_string(test_case.mysql_admin_port) + "/tcp 2>/dev/null || true"; + string cleanup_pgsql_admin = "fuser -k " + std::to_string(test_case.pgsql_admin_port) + "/tcp 2>/dev/null || true"; + string cleanup_mysql_worker = "fuser -k " + std::to_string(test_case.mysql_worker_port) + "/tcp 2>/dev/null || true"; + string cleanup_pgsql_worker = "fuser -k " + std::to_string(test_case.pgsql_worker_port) + "/tcp 2>/dev/null || true"; + + int result1 = system(cleanup_mysql_admin.c_str()); + int result2 = system(cleanup_pgsql_admin.c_str()); + int result3 = system(cleanup_mysql_worker.c_str()); + int result4 = system(cleanup_pgsql_worker.c_str()); + (void)result1; (void)result2; (void)result3; (void)result4; // Suppress unused warnings + + // Also kill any remaining ProxySQL processes from previous test cases + string kill_all_cmd = "pkill -f \"proxysql.*reg_test_4960_node_\" 2>/dev/null || true"; + int kill_result = system(kill_all_cmd.c_str()); + (void)kill_result; // Suppress unused warning + sleep(2); // Give time for cleanup to complete + + diag(" Pre-test cleanup completed"); + + // Reset global PID before launch + g_proxysql_pid = -1; + + // Launch ProxySQL instance + if (launch_proxysql_instance(test_case, cl) != EXIT_SUCCESS) { + diag("Failed to launch ProxySQL for test case: %s", test_case.name.c_str()); + return EXIT_FAILURE; + } + + diag(" Checking admin and worker interfaces..."); + + // Check if admin interfaces are listening as expected + if (test_case.mysql_admin_expected) { + diag(" Checking MySQL admin interface on port %d (should be listening)...", test_case.mysql_admin_port); + if (!check_port_listening(test_case.mysql_admin_port)) { + diag(" ❌ MySQL admin interface NOT listening on port %d for test: %s", + test_case.mysql_admin_port, test_case.name.c_str()); + result = EXIT_FAILURE; + } else { + diag(" ✅ MySQL admin interface IS listening on port %d", test_case.mysql_admin_port); + } + } else { + diag(" Checking MySQL admin interface on port %d (should NOT be listening)...", test_case.mysql_admin_port); + if (check_port_listening(test_case.mysql_admin_port, 1)) { + diag(" ❌ MySQL admin interface unexpectedly listening on port %d for test: %s", + test_case.mysql_admin_port, test_case.name.c_str()); + result = EXIT_FAILURE; + } else { + diag(" ✅ MySQL admin interface correctly NOT listening on port %d", test_case.mysql_admin_port); + } + } + + if (test_case.pgsql_admin_expected) { + diag(" Checking PgSQL admin interface on port %d (should be listening)...", test_case.pgsql_admin_port); + if (!check_port_listening(test_case.pgsql_admin_port)) { + diag(" ❌ PgSQL admin interface NOT listening on port %d for test: %s", + test_case.pgsql_admin_port, test_case.name.c_str()); + result = EXIT_FAILURE; + } else { + diag(" ✅ PgSQL admin interface IS listening on port %d", test_case.pgsql_admin_port); + } + } else { + diag(" Checking PgSQL admin interface on port %d (should NOT be listening)...", test_case.pgsql_admin_port); + if (check_port_listening(test_case.pgsql_admin_port, 1)) { + diag(" ❌ PgSQL admin interface unexpectedly listening on port %d for test: %s", + test_case.pgsql_admin_port, test_case.name.c_str()); + result = EXIT_FAILURE; + } else { + diag(" ✅ PgSQL admin interface correctly NOT listening on port %d", test_case.pgsql_admin_port); + } + } + + // Check if worker interfaces are listening as expected + if (test_case.mysql_worker_expected) { + diag(" Checking MySQL worker interface on port %d (should be listening)...", test_case.mysql_worker_port); + if (!check_port_listening(test_case.mysql_worker_port)) { + diag(" ❌ MySQL worker interface NOT listening on port %d for test: %s", + test_case.mysql_worker_port, test_case.name.c_str()); + result = EXIT_FAILURE; + } else { + diag(" ✅ MySQL worker interface IS listening on port %d", test_case.mysql_worker_port); + } + } else { + diag(" Checking MySQL worker interface on port %d (should NOT be listening)...", test_case.mysql_worker_port); + if (check_port_listening(test_case.mysql_worker_port, 1)) { + diag(" ❌ MySQL worker interface unexpectedly listening on port %d for test: %s", + test_case.mysql_worker_port, test_case.name.c_str()); + result = EXIT_FAILURE; + } else { + diag(" ✅ MySQL worker interface correctly NOT listening on port %d", test_case.mysql_worker_port); + } + } + + if (test_case.pgsql_worker_expected) { + diag(" Checking PgSQL worker interface on port %d (should be listening)...", test_case.pgsql_worker_port); + if (!check_port_listening(test_case.pgsql_worker_port)) { + diag(" ❌ PgSQL worker interface NOT listening on port %d for test: %s", + test_case.pgsql_worker_port, test_case.name.c_str()); + result = EXIT_FAILURE; + } else { + diag(" ✅ PgSQL worker interface IS listening on port %d", test_case.pgsql_worker_port); + } + } else { + diag(" Checking PgSQL worker interface on port %d (should NOT be listening)...", test_case.pgsql_worker_port); + if (check_port_listening(test_case.pgsql_worker_port, 1)) { + diag(" ❌ PgSQL worker interface unexpectedly listening on port %d for test: %s", + test_case.pgsql_worker_port, test_case.name.c_str()); + result = EXIT_FAILURE; + } else { + diag(" ✅ PgSQL worker interface correctly NOT listening on port %d", test_case.pgsql_worker_port); + } + } + + // Run PROXYSQL STOP/START tests if MySQL admin is enabled and all checks passed so far + if (test_case.mysql_admin_expected && result == EXIT_SUCCESS) { + diag(" Running PROXYSQL STOP/START tests for MySQL admin interface..."); + + // Configure STOP/START test with current test case name for better diagnostics + ProxySQLStopStartTestConfig stop_start_config; + stop_start_config.test_name_prefix = test_case.name + "_mysql_admin"; + stop_start_config.verbose_logging = true; // Enable detailed logging for debugging + + int stop_start_result = test_proxysql_stop_start_with_connection( + "127.0.0.1", "admin", "admin", test_case.mysql_admin_port, stop_start_config); + + if (stop_start_result == -1) { + diag(" ❌ PROXYSQL STOP/START tests failed for test case: %s", test_case.name.c_str()); + result = EXIT_FAILURE; + } else { + diag(" ✅ PROXYSQL STOP/START tests passed for test case: %s", test_case.name.c_str()); + // Note: The STOP/START tests already perform their own ok() calls internally + } + } + + // Force kill any remaining ProxySQL processes to ensure cleanup + diag(" Force killing any remaining ProxySQL processes..."); + string kill_cmd = "pkill -f \"proxysql.*" + string { cl.workdir } + "reg_test_4960_node_" + test_case.name + "\" 2>/dev/null || true"; + int force_kill_result = system(kill_cmd.c_str()); + (void)force_kill_result; // Suppress unused warning + sleep(1); + + // Additional cleanup - kill by global PID if needed + if (g_proxysql_pid > 0) { + diag(" Ensuring ProxySQL (PID: %d) is terminated...", g_proxysql_pid); + kill(g_proxysql_pid, SIGKILL); // Use SIGKILL to ensure termination + sleep(1); + int status; + waitpid(g_proxysql_pid, &status, WNOHANG); // Non-blocking wait + g_proxysql_pid = -1; // Reset global PID + } + + // Additional post-test cleanup for safety + int post_result1 = system(cleanup_mysql_admin.c_str()); + int post_result2 = system(cleanup_pgsql_admin.c_str()); + int post_result3 = system(cleanup_mysql_worker.c_str()); + int post_result4 = system(cleanup_pgsql_worker.c_str()); + (void)post_result1; (void)post_result2; (void)post_result3; (void)post_result4; // Suppress unused warnings + + diag(" Post-test cleanup completed"); + + return result; +} + +int main(int argc, char** argv) { + CommandLine cl; + + const char* WORKSPACE = getenv("WORKSPACE"); + + if (cl.getEnv() || WORKSPACE == nullptr) { + diag("Failed to get the required environmental variables."); + return EXIT_FAILURE; + } + + // Check for required system tools upfront + diag("Checking for required system tools..."); + int nc_check_result = system("which nc >/dev/null 2>&1"); + (void)nc_check_result; // Suppress unused result warning + if (nc_check_result != 0) { + diag("ERROR: 'nc' (netcat) command not found. Please install netcat to run this test."); + diag("On Ubuntu/Debian: sudo apt-get install netcat-openbsd"); + diag("On CentOS/RHEL: sudo yum install nc"); + diag("On Fedora: sudo dnf install nmap-ncat"); + plan(0); // Skip all tests + return exit_status(); + } + diag("Required tools found."); + + // Define test cases for all 16 combinations of 4 boolean variables: + // mysql-workers, pgsql-workers, mysql-admin, pgsql-admin + vector test_cases = { + // 0000: all disabled + { + "0000_all_disabled", + {"--mysql-workers", "false", "--pgsql-workers", "false", "--mysql-admin", "false", "--pgsql-admin", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_node_0000_all_disabled\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:13750\"\n" + " pgsql_ifaces=\"127.0.0.1:13751\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13752\"\n" + "}\n\n" + "pgsql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13753\"\n" + "}\n", + 13750, 13751, 13752, 13753, true, false, false, false, false + }, + + // 0001: only pgsql-admin enabled + { + "0001_pgsql_admin_only", + {"--mysql-workers", "false", "--pgsql-workers", "false", "--mysql-admin", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_node_0001_pgsql_admin_only\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:13754\"\n" + " pgsql_ifaces=\"127.0.0.1:13755\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13756\"\n" + "}\n\n" + "pgsql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13757\"\n" + "}\n", + 13754, 13755, 13756, 13757, true, false, true, false, false + }, + + // 0010: only mysql-admin enabled + { + "0010_mysql_admin_only", + {"--mysql-workers", "false", "--pgsql-workers", "false", "--pgsql-admin", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_node_0010_mysql_admin_only\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:13758\"\n" + " pgsql_ifaces=\"127.0.0.1:13759\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13760\"\n" + "}\n\n" + "pgsql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13761\"\n" + "}\n", + 13758, 13759, 13760, 13761, true, true, false, false, false + }, + + // 0011: mysql-admin + pgsql-admin enabled + { + "0011_admin_only", + {"--mysql-workers", "false", "--pgsql-workers", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_node_0011_admin_only\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:13762\"\n" + " pgsql_ifaces=\"127.0.0.1:13763\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13764\"\n" + "}\n\n" + "pgsql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13765\"\n" + "}\n", + 13762, 13763, 13764, 13765, true, true, true, false, false + }, + + // 0100: only pgsql-workers enabled + { + "0100_pgsql_workers_only", + {"--mysql-workers", "false", "--mysql-admin", "false", "--pgsql-admin", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_node_0100_pgsql_workers_only\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:13766\"\n" + " pgsql_ifaces=\"127.0.0.1:13767\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13768\"\n" + "}\n\n" + "pgsql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13769\"\n" + "}\n", + 13766, 13767, 13768, 13769, true, false, false, false, true + }, + + // 0101: pgsql-workers + pgsql-admin enabled + { + "0101_pgsql_workers_admin", + {"--mysql-workers", "false", "--mysql-admin", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_node_0101_pgsql_workers_admin\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:13770\"\n" + " pgsql_ifaces=\"127.0.0.1:13771\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13772\"\n" + "}\n\n" + "pgsql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13773\"\n" + "}\n", + 13770, 13771, 13772, 13773, true, false, true, false, true + }, + + // 0110: pgsql-workers + mysql-admin enabled + { + "0110_pgsql_workers_mysql_admin", + {"--mysql-workers", "false", "--pgsql-admin", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_node_0110_pgsql_workers_mysql_admin\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:13774\"\n" + " pgsql_ifaces=\"127.0.0.1:13775\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13776\"\n" + "}\n\n" + "pgsql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13777\"\n" + "}\n", + 13774, 13775, 13776, 13777, true, true, false, false, true + }, + + // 0111: pgsql-workers + mysql-admin + pgsql-admin enabled + { + "0111_pgsql_workers_all_admin", + {"--mysql-workers", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_node_0111_pgsql_workers_all_admin\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:13778\"\n" + " pgsql_ifaces=\"127.0.0.1:13779\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13780\"\n" + "}\n\n" + "pgsql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13781\"\n" + "}\n", + 13778, 13779, 13780, 13781, true, true, true, false, true + }, + + // 1000: only mysql-workers enabled + { + "1000_mysql_workers_only", + {"--pgsql-workers", "false", "--mysql-admin", "false", "--pgsql-admin", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_node_1000_mysql_workers_only\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:13782\"\n" + " pgsql_ifaces=\"127.0.0.1:13783\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13784\"\n" + "}\n\n" + "pgsql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13785\"\n" + "}\n", + 13782, 13783, 13784, 13785, true, false, false, true, false + }, + + // 1001: mysql-workers + pgsql-admin enabled + { + "1001_mysql_workers_pgsql_admin", + {"--pgsql-workers", "false", "--mysql-admin", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_node_1001_mysql_workers_pgsql_admin\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:13786\"\n" + " pgsql_ifaces=\"127.0.0.1:13787\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13788\"\n" + "}\n\n" + "pgsql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13789\"\n" + "}\n", + 13786, 13787, 13788, 13789, true, false, true, true, false + }, + + // 1010: mysql-workers + mysql-admin enabled + { + "1010_mysql_workers_admin", + {"--pgsql-workers", "false", "--pgsql-admin", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_node_1010_mysql_workers_admin\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:13790\"\n" + " pgsql_ifaces=\"127.0.0.1:13791\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13792\"\n" + "}\n\n" + "pgsql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13793\"\n" + "}\n", + 13790, 13791, 13792, 13793, true, true, false, true, false + }, + + // 1011: mysql-workers + mysql-admin + pgsql-admin enabled + { + "1011_mysql_workers_all_admin", + {"--pgsql-workers", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_node_1011_mysql_workers_all_admin\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:13794\"\n" + " pgsql_ifaces=\"127.0.0.1:13795\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13796\"\n" + "}\n\n" + "pgsql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13797\"\n" + "}\n", + 13794, 13795, 13796, 13797, true, true, true, true, false + }, + + // 1100: both workers enabled, no admin + { + "1100_workers_only", + {"--mysql-admin", "false", "--pgsql-admin", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_node_1100_workers_only\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:13798\"\n" + " pgsql_ifaces=\"127.0.0.1:13799\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13800\"\n" + "}\n\n" + "pgsql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13801\"\n" + "}\n", + 13798, 13799, 13800, 13801, true, false, false, true, true + }, + + // 1101: both workers + pgsql-admin enabled + { + "1101_workers_mysql_pgsql_admin", + {"--mysql-admin", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_node_1101_workers_mysql_pgsql_admin\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:13802\"\n" + " pgsql_ifaces=\"127.0.0.1:13803\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13804\"\n" + "}\n\n" + "pgsql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13805\"\n" + "}\n", + 13802, 13803, 13804, 13805, true, false, true, true, true + }, + + // 1110: both workers + mysql-admin enabled + { + "1110_workers_mysql_admin", + {"--pgsql-admin", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_node_1110_workers_mysql_admin\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:13806\"\n" + " pgsql_ifaces=\"127.0.0.1:13807\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13808\"\n" + "}\n\n" + "pgsql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13809\"\n" + "}\n", + 13806, 13807, 13808, 13809, true, true, false, true, true + }, + + // 1111: all enabled (default) + { + "1111_all_enabled", + {}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_node_1111_all_enabled\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:13810\"\n" + " pgsql_ifaces=\"127.0.0.1:13811\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13812\"\n" + "}\n\n" + "pgsql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:13813\"\n" + "}\n", + 13810, 13811, 13812, 13813, true, true, true, true, true + } + }; + + // Count test cases with MySQL admin enabled for STOP/START tests + int mysql_admin_tests = 0; + for (const auto& test_case : test_cases) { + if (test_case.mysql_admin_expected) { + mysql_admin_tests++; + } + } + + // Base tests + STOP/START tests (PROXYSQL_STOP_START_TEST_COUNT tests per MySQL admin case) + plan(static_cast(test_cases.size()) + (mysql_admin_tests * PROXYSQL_STOP_START_TEST_COUNT)); + diag("Running %d module startup tests + %d STOP/START tests (%d each for %d MySQL admin cases)", + static_cast(test_cases.size()), mysql_admin_tests * PROXYSQL_STOP_START_TEST_COUNT, PROXYSQL_STOP_START_TEST_COUNT, mysql_admin_tests); + + // Run all test cases + for (const auto& test_case : test_cases) { + diag("============================================================"); + int result = run_test_case(test_case, cl); + ok(result == EXIT_SUCCESS, "Test case '%s' %s", test_case.name.c_str(), + result == EXIT_SUCCESS ? "passed" : "failed"); + } + diag("============================================================"); + + return exit_status(); +} \ No newline at end of file diff --git a/test/tap/tests/reg_test_4960_monitor_modules-t.cpp b/test/tap/tests/reg_test_4960_monitor_modules-t.cpp new file mode 100644 index 0000000000..31822a93a6 --- /dev/null +++ b/test/tap/tests/reg_test_4960_monitor_modules-t.cpp @@ -0,0 +1,348 @@ +/** + * @file reg_test_4960_monitor_modules-t.cpp + * @brief TAP test for verifying monitor module enable/disable functionality from PR #4960. + * + * This test verifies that MySQL and PostgreSQL monitor modules can be enabled/disabled + * via CLI arguments and that their status is correctly reflected in the global_variables table. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mysql.h" +#include "mysqld_error.h" + +#include "proxysql_utils.h" +#include "tap.h" +#include "command_line.h" +#include "utils.h" +#include "test_proxysql_stop_query_handling.hpp" + +using std::string; +using std::vector; + +struct MonitorTestCase { + string name; + vector cli_args; + string config_content; + int mysql_admin_port; + int mysql_worker_port; + bool mysql_monitor_expected; + bool pgsql_monitor_expected; +}; + +int connect_to_proxysql_admin(int port, MYSQL*& mysql) { + mysql = mysql_init(NULL); + if (!mysql) { + diag("MySQL initialization failed"); + return -1; + } + + // Set connection timeout + unsigned int timeout = 5; + mysql_options(mysql, MYSQL_OPT_CONNECT_TIMEOUT, &timeout); + mysql_options(mysql, MYSQL_OPT_READ_TIMEOUT, &timeout); + mysql_options(mysql, MYSQL_OPT_WRITE_TIMEOUT, &timeout); + + // Connect to ProxySQL admin + if (!mysql_real_connect(mysql, "127.0.0.1", "admin", "admin", NULL, port, NULL, 0)) { + diag("Failed to connect to ProxySQL admin on port %d: %s", port, mysql_error(mysql)); + mysql_close(mysql); + mysql = NULL; + return -1; + } + + return 0; +} + +int check_monitor_status(MYSQL* mysql, const string& monitor_name, bool expected_enabled) { + string query = "SELECT variable_value FROM global_variables WHERE variable_name = '" + monitor_name + "'"; + + int query_result = mysql_query(mysql, query.c_str()); + if (query_result != 0) { + diag("Failed to execute query for %s: %s", monitor_name.c_str(), mysql_error(mysql)); + return -1; + } + + MYSQL_RES* result = mysql_store_result(mysql); + if (!result) { + diag("Failed to store result for %s: %s", monitor_name.c_str(), mysql_error(mysql)); + return -1; + } + + MYSQL_ROW row = mysql_fetch_row(result); + if (!row) { + diag("No result found for %s", monitor_name.c_str()); + mysql_free_result(result); + return -1; + } + + string variable_value = row[0] ? row[0] : ""; + bool actual_enabled = (variable_value == "true"); + + mysql_free_result(result); + + if (actual_enabled != expected_enabled) { + diag("Monitor status mismatch for %s: expected %s, got %s", + monitor_name.c_str(), + expected_enabled ? "true" : "false", + actual_enabled ? "true" : "false"); + return 1; + } + + return 0; +} + +int launch_proxysql_instance(const MonitorTestCase& test_case, const CommandLine& cl) { + const string test_datadir = string { cl.workdir } + "reg_test_4960_monitor_" + test_case.name; + const string test_config_file = test_datadir + "/proxysql.cfg"; + + // Clean up existing datadir if it exists + string cleanup_cmd = "rm -rf " + test_datadir; + int cleanup_result = system(cleanup_cmd.c_str()); + (void)cleanup_result; + + // Create test datadir + string mkdir_cmd = "mkdir -p " + test_datadir; + int mkdir_result = system(mkdir_cmd.c_str()); + (void)mkdir_result; + + // Create config file + std::ofstream config_file(test_config_file); + config_file << test_case.config_content; + config_file.close(); + + // Build command to start ProxySQL + const string proxysql_path { string { getenv("WORKSPACE") } + "/src/proxysql" }; + string cmd = proxysql_path + " -f -c " + test_config_file; + + // Add CLI arguments + for (const auto& arg : test_case.cli_args) { + cmd += " " + string(arg); + } + + // Create log file path + const string log_file = test_datadir + "/proxysql.log"; + + // Start ProxySQL in background with output redirected to log file + diag(" Starting ProxySQL with output redirected to %s", log_file.c_str()); + string full_cmd = cmd + " > " + log_file + " 2>&1 &"; + int start_result = system(full_cmd.c_str()); + (void)start_result; + + // Wait a bit for startup + sleep(5); + + return EXIT_SUCCESS; +} + +int run_monitor_test_case(const MonitorTestCase& test_case, const CommandLine& cl) { + int result = EXIT_SUCCESS; + + diag("Running monitor test case: %s", test_case.name.c_str()); + diag(" Expected MySQL monitor: %s", test_case.mysql_monitor_expected ? "YES" : "NO"); + diag(" Expected PgSQL monitor: %s", test_case.pgsql_monitor_expected ? "YES" : "NO"); + + // Display CLI arguments if any + if (!test_case.cli_args.empty()) { + diag(" CLI arguments:"); + for (size_t i = 0; i < test_case.cli_args.size(); i++) { + diag(" %s", test_case.cli_args[i]); + } + } + + // Launch ProxySQL instance + if (launch_proxysql_instance(test_case, cl) != EXIT_SUCCESS) { + diag("Failed to launch ProxySQL for test case: %s", test_case.name.c_str()); + return EXIT_FAILURE; + } + + // Wait for ProxySQL to be ready using the standard approach + diag(" Waiting for ProxySQL admin interface to be ready..."); + conn_opts_t conn_opts {}; + conn_opts.host = "127.0.0.1"; + conn_opts.port = test_case.mysql_admin_port; + conn_opts.user = "admin"; + conn_opts.pass = "admin"; + + MYSQL* mysql = wait_for_proxysql(conn_opts, 15); + if (mysql == nullptr) { + diag(" ❌ Failed to connect to ProxySQL admin interface after 15 seconds"); + result = EXIT_FAILURE; + } else { + diag(" ✅ Connected to admin interface"); + + // Check MySQL monitor status + diag(" Checking MySQL monitor status..."); + int mysql_result = check_monitor_status(mysql, "mysql-monitor_enabled", test_case.mysql_monitor_expected); + if (mysql_result == 0) { + diag(" ✅ MySQL monitor status correct"); + } else if (mysql_result == 1) { + diag(" ❌ MySQL monitor status incorrect"); + result = EXIT_FAILURE; + } else { + diag(" ❌ Error checking MySQL monitor status"); + result = EXIT_FAILURE; + } + + // Check PgSQL monitor status + diag(" Checking PgSQL monitor status..."); + int pgsql_result = check_monitor_status(mysql, "pgsql-monitor_enabled", test_case.pgsql_monitor_expected); + if (pgsql_result == 0) { + diag(" ✅ PgSQL monitor status correct"); + } else if (pgsql_result == 1) { + diag(" ❌ PgSQL monitor status incorrect"); + result = EXIT_FAILURE; + } else { + diag(" ❌ Error checking PgSQL monitor status"); + result = EXIT_FAILURE; + } + + mysql_close(mysql); + + // Run PROXYSQL STOP/START tests since we have MySQL admin interface and all checks passed + if (result == EXIT_SUCCESS) { + diag(" Running PROXYSQL STOP/START tests for monitor test case..."); + + // Configure STOP/START test with current test case name for better diagnostics + ProxySQLStopStartTestConfig stop_start_config; + stop_start_config.test_name_prefix = test_case.name + "_monitor"; + stop_start_config.verbose_logging = true; // Enable detailed logging for debugging + + int stop_start_result = test_proxysql_stop_start_with_connection( + "127.0.0.1", "admin", "admin", test_case.mysql_admin_port, stop_start_config); + + if (stop_start_result == -1) { + diag(" ❌ PROXYSQL STOP/START tests failed for monitor test case: %s", test_case.name.c_str()); + result = EXIT_FAILURE; + } else { + diag(" ✅ PROXYSQL STOP/START tests passed for monitor test case: %s", test_case.name.c_str()); + // Note: The STOP/START tests already perform their own ok() calls internally + } + } + } + + // Force cleanup + string kill_cmd = "pkill -f \"proxysql.*" + string { cl.workdir } + "reg_test_4960_monitor_" + test_case.name + "\" 2>/dev/null || true"; + int kill_result = system(kill_cmd.c_str()); + (void)kill_result; + + // Cleanup ports + string cleanup_admin = "fuser -k " + std::to_string(test_case.mysql_admin_port) + "/tcp 2>/dev/null || true"; + string cleanup_worker = "fuser -k " + std::to_string(test_case.mysql_worker_port) + "/tcp 2>/dev/null || true"; + int cleanup_admin_result = system(cleanup_admin.c_str()); + int cleanup_worker_result = system(cleanup_worker.c_str()); + (void)cleanup_admin_result; + (void)cleanup_worker_result; + + diag(" Monitor test completed"); + + return result; +} + +int main(int argc, char** argv) { + CommandLine cl; + + const char* WORKSPACE = getenv("WORKSPACE"); + + if (cl.getEnv() || WORKSPACE == nullptr) { + diag("Failed to get the required environmental variables."); + return EXIT_FAILURE; + } + + // Define monitor test cases - 4 combinations of monitor enable/disable + vector test_cases = { + // Test 1: Both monitors enabled (default) + { + "both_enabled", + {}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_monitor_both_enabled\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:14050\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:14051\"\n" + "}\n", + 14050, 14051, true, true + }, + + // Test 2: MySQL monitor disabled, PgSQL monitor enabled + { + "mysql_disabled", + {"--mysql-monitor", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_monitor_mysql_disabled\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:14052\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:14053\"\n" + "}\n", + 14052, 14053, false, true + }, + + // Test 3: MySQL monitor enabled, PgSQL monitor disabled + { + "pgsql_disabled", + {"--pgsql-monitor", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_monitor_pgsql_disabled\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:14054\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:14055\"\n" + "}\n", + 14054, 14055, true, false + }, + + // Test 4: Both monitors disabled + { + "both_disabled", + {"--mysql-monitor", "false", "--pgsql-monitor", "false"}, + string { "datadir=\"" } + cl.workdir + "reg_test_4960_monitor_both_disabled\"\n\n" + "admin_variables=\n" + "{\n" + " admin_credentials=\"admin:admin\"\n" + " mysql_ifaces=\"127.0.0.1:14056\"\n" + "}\n\n" + "mysql_variables=\n" + "{\n" + " interfaces=\"127.0.0.1:14057\"\n" + "}\n", + 14056, 14057, false, false + } + }; + + // All monitor tests use MySQL admin interface, so all get STOP/START tests + // Base tests + STOP/START tests (PROXYSQL_STOP_START_TEST_COUNT tests each) + plan(static_cast(test_cases.size()) + (static_cast(test_cases.size()) * PROXYSQL_STOP_START_TEST_COUNT)); + diag("Running %d monitor tests + %d STOP/START tests (%d each for %d test cases)", + static_cast(test_cases.size()), static_cast(test_cases.size()) * PROXYSQL_STOP_START_TEST_COUNT, PROXYSQL_STOP_START_TEST_COUNT, static_cast(test_cases.size())); + + // Run all monitor test cases + for (const auto& test_case : test_cases) { + diag("============================================================"); + int result = run_monitor_test_case(test_case, cl); + ok(result == EXIT_SUCCESS, "Monitor test case '%s' %s", test_case.name.c_str(), + result == EXIT_SUCCESS ? "passed" : "failed"); + } + diag("============================================================"); + + return exit_status(); +} \ No newline at end of file diff --git a/test/tap/tests/test_proxysql_stop_query_handling-t.cpp b/test/tap/tests/test_proxysql_stop_query_handling-t.cpp new file mode 100644 index 0000000000..6ab8ef1024 --- /dev/null +++ b/test/tap/tests/test_proxysql_stop_query_handling-t.cpp @@ -0,0 +1,31 @@ +/** + * @file test_proxysql_stop_query_handling-t.cpp + * @brief This test verifies PROXYSQL STOP query handling fix for issue 5186. + * Tests that admin queries are properly handled during STOP state. + * This is a wrapper around the shared test functions. + * @date 2025-11-23 + */ + +#include "test_proxysql_stop_query_handling.hpp" + +int main(int argc, char** argv) { + CommandLine cl; + + if (cl.getEnv()) { + diag("Failed to get the required environmental variables."); + return -1; + } + + // We expect PROXYSQL_STOP_START_TEST_COUNT test cases from the shared test function + plan(PROXYSQL_STOP_START_TEST_COUNT); + + int result = test_proxysql_stop_start_with_connection(cl.host, cl.admin_username, + cl.admin_password, cl.admin_port); + + if (result == -1) { + diag("Failed to connect to ProxySQL admin or critical test failure"); + return exit_status(); + } + + return exit_status(); +} diff --git a/test/tap/tests/test_proxysql_stop_query_handling.hpp b/test/tap/tests/test_proxysql_stop_query_handling.hpp new file mode 100644 index 0000000000..aa2c0fb721 --- /dev/null +++ b/test/tap/tests/test_proxysql_stop_query_handling.hpp @@ -0,0 +1,208 @@ +/** + * @file test_proxysql_stop_query_handling.hpp + * @brief Shared header for PROXYSQL STOP/START query handling tests. + * Provides reusable functions to test STOP/START behavior with various module configurations. + * @date 2025-01-24 + */ + +#ifndef TEST_PROXYSQL_STOP_QUERY_HANDLING_HPP +#define TEST_PROXYSQL_STOP_QUERY_HANDLING_HPP + +// Number of individual test cases performed by test_proxysql_stop_start_handling() +// Update this value if you add/remove tests in the function +#define PROXYSQL_STOP_START_TEST_COUNT 13 + +#include +#include +#include +#include +#include + +#include "mysql.h" +#include "mysqld_error.h" + +#include "tap.h" +#include "command_line.h" +#include "utils.h" + +using std::string; + +struct ProxySQLStopStartTestConfig { + string test_name_prefix; + int sleep_after_stop; + int sleep_after_start; + bool verbose_logging; + + ProxySQLStopStartTestConfig() : + test_name_prefix(""), + sleep_after_stop(2), + sleep_after_start(3), + verbose_logging(false) {} +}; + +// Helper function to execute query and check if it succeeds +bool execute_query_succeeds(MYSQL* mysql, const string& query) { + if (mysql_query(mysql, query.c_str()) == 0) { + mysql_free_result(mysql_store_result(mysql)); + return true; + } + return false; +} + +// Helper function to execute query and check if it fails as expected +bool execute_query_fails(MYSQL* mysql, const string& query, const string& expected_error_substring = "") { + int rc = mysql_query(mysql, query.c_str()); + if (rc != 0) { + string error = mysql_error(mysql); + if (expected_error_substring.empty() || error.find(expected_error_substring) != string::npos) { + return true; // Failed as expected + } + } + return false; // Should have failed but didn't +} + +// Helper function to get row count from a query +int get_row_count(MYSQL* mysql, const string& query) { + if (mysql_query(mysql, query.c_str()) == 0) { + MYSQL_RES* result = mysql_store_result(mysql); + if (result) { + int count = mysql_num_rows(result); + mysql_free_result(result); + return count; + } + } + return -1; +} + +/** + * @brief Tests PROXYSQL STOP/START functionality with a connected admin interface + * + * This function performs the complete test sequence: + * 1. Execute PROXYSQL STOP + * 2. Test queries that work with null pointer protection during STOP state + * 3. Test modification queries during STOP state + * 4. Test basic queries that should succeed during STOP state + * 5. Execute PROXYSQL START + * 6. Verify queries continue to work after START + * + * @param admin_mysql Connected MYSQL admin interface + * @param config Test configuration options + * @return int Number of tests performed (PROXYSQL_STOP_START_TEST_COUNT), or -1 if critical failure + */ +int test_proxysql_stop_start_handling(MYSQL* admin_mysql, const ProxySQLStopStartTestConfig& config = ProxySQLStopStartTestConfig()) { + string test_prefix = config.test_name_prefix.empty() ? "" : config.test_name_prefix + " - "; + + // === TEST 1: Execute PROXYSQL STOP === + bool stop_success = execute_query_succeeds(admin_mysql, "PROXYSQL STOP"); + ok(stop_success, "%sPROXYSQL STOP command should succeed", test_prefix.c_str()); + + if (!stop_success) { + diag("%sPROXYSQL STOP failed, cannot continue with remaining tests", test_prefix.c_str()); + return -1; + } + + // Give some time for STOP to complete + sleep(config.sleep_after_stop); + + // === TESTS 2-5: Test queries that work with null pointer protection during STOP state === + + // TEST 2: runtime_mysql_query_rules should work normally with null pointer protection + int row_count = get_row_count(admin_mysql, "SELECT COUNT(*) FROM runtime_mysql_query_rules"); + ok(row_count >= 0, "%sruntime_mysql_query_rules should return valid count during STOP state, actual: %d", test_prefix.c_str(), row_count); + + // TEST 3: runtime_mysql_query_rules_fast_routing should work normally with null pointer protection + row_count = get_row_count(admin_mysql, "SELECT COUNT(*) FROM runtime_mysql_query_rules_fast_routing"); + ok(row_count >= 0, "%sruntime_mysql_query_rules_fast_routing should return valid count during STOP state, actual: %d", test_prefix.c_str(), row_count); + + // TEST 4: runtime_mysql_users should work normally with null pointer protection + row_count = get_row_count(admin_mysql, "SELECT COUNT(*) FROM runtime_mysql_users"); + ok(row_count >= 0, "%sruntime_mysql_users should return valid count during STOP state, actual: %d", test_prefix.c_str(), row_count); + + // TEST 5: stats_mysql_query_digest should work normally with null pointer protection + row_count = get_row_count(admin_mysql, "SELECT COUNT(*) FROM stats_mysql_query_digest"); + ok(row_count >= 0, "%sstats_mysql_query_digest should return valid count during STOP state, actual: %d", test_prefix.c_str(), row_count); + + // === TEST 6: Test modification queries during STOP state === + + // TEST 6: LOAD MYSQL USERS TO RUNTIME should succeed (MySQL Auth module is loaded) + bool load_users_success = execute_query_succeeds(admin_mysql, "LOAD MYSQL USERS TO RUNTIME"); + ok(load_users_success, "%sLOAD MYSQL USERS TO RUNTIME should succeed during STOP state", test_prefix.c_str()); + + // TEST 7: LOAD MYSQL QUERY RULES TO RUNTIME should fail (Query Processor not started) + bool load_rules_fails = execute_query_fails(admin_mysql, "LOAD MYSQL QUERY RULES TO RUNTIME", "Global Query Processor not started"); + ok(load_rules_fails, "%sLOAD MYSQL QUERY RULES TO RUNTIME should fail during STOP state", test_prefix.c_str()); + + // === TESTS 8-11: Test queries that should SUCCEED during STOP state === + + // TEST 8: Basic arithmetic query should work + bool basic_query_success = execute_query_succeeds(admin_mysql, "SELECT 1+1"); + ok(basic_query_success, "%sBasic arithmetic query (SELECT 1+1) should work during STOP state", test_prefix.c_str()); + + // TEST 9: Version query should work + bool version_success = execute_query_succeeds(admin_mysql, "SELECT @@version"); + ok(version_success, "%sVersion query should work during STOP state", test_prefix.c_str()); + + // TEST 10: SHOW PROMETHEUS METRICS should work (existing functionality) + bool prometheus_success = execute_query_succeeds(admin_mysql, "SHOW PROMETHEUS METRICS"); + ok(prometheus_success, "%sSHOW PROMETHEUS METRICS should work during STOP state", test_prefix.c_str()); + + // TEST 11: Basic SELECT should work + bool db_select_success = execute_query_succeeds(admin_mysql, "SELECT DATABASE()"); + bool user_select_success = execute_query_succeeds(admin_mysql, "SELECT USER()"); + ok(db_select_success && user_select_success, "%sBasic SELECT (DATABASE() and USER()) should work during STOP state", test_prefix.c_str()); + + // === TEST 12: Execute PROXYSQL START === + bool start_success = execute_query_succeeds(admin_mysql, "PROXYSQL START"); + ok(start_success, "%sPROXYSQL START command should succeed", test_prefix.c_str()); + + if (!start_success) { + diag("%sPROXYSQL START failed, cannot continue with final test", test_prefix.c_str()); + return -1; + } + + // Give some time for START to complete + sleep(config.sleep_after_start); + + // === TEST 13: Test that queries continue to work after START === + + // After START, runtime queries should continue to work normally + row_count = get_row_count(admin_mysql, "SELECT COUNT(*) FROM runtime_mysql_query_rules"); + ok(row_count >= 0, "%sruntime_mysql_query_rules should continue to work after START, rows: %d", test_prefix.c_str(), row_count); + + return PROXYSQL_STOP_START_TEST_COUNT; // Total number of tests performed +} + +/** + * @brief Connects to ProxySQL admin interface and runs STOP/START tests + * + * @param host ProxySQL host + * @param admin_username Admin username + * @param admin_password Admin password + * @param admin_port Admin port + * @param config Test configuration + * @return int Number of tests performed, or -1 if connection failed + */ +int test_proxysql_stop_start_with_connection(const string& host, const string& admin_username, + const string& admin_password, int admin_port, + const ProxySQLStopStartTestConfig& config = ProxySQLStopStartTestConfig()) { + MYSQL* proxysql_admin = mysql_init(NULL); + if (!proxysql_admin) { + fprintf(stderr, "File %s, line %d, Error: MySQL initialization failed\n", __FILE__, __LINE__); + return -1; + } + + // Connect to ProxySQL admin interface + if (!mysql_real_connect(proxysql_admin, host.c_str(), admin_username.c_str(), + admin_password.c_str(), NULL, admin_port, NULL, 0)) { + fprintf(stderr, "File %s, line %d, Error: %s\n", __FILE__, __LINE__, mysql_error(proxysql_admin)); + mysql_close(proxysql_admin); + return -1; + } + + int result = test_proxysql_stop_start_handling(proxysql_admin, config); + + mysql_close(proxysql_admin); + return result; +} + +#endif // TEST_PROXYSQL_STOP_QUERY_HANDLING_HPP \ No newline at end of file