1CREATE SCHEMA "Mx Regular User";
2SET search_path TO "Mx Regular User";
3
4-- add coordinator in idempotent way
5SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
6-- sync the metadata to both nodes
7SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
8SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
9
10-- create a role and give access one each node separately
11-- and increase the error level to prevent enterprise to diverge
12SET client_min_messages TO ERROR;
13SET citus.enable_ddl_propagation TO OFF;
14CREATE USER regular_mx_user WITH LOGIN;
15SELECT 1 FROM run_command_on_workers($$CREATE USER regular_mx_user WITH LOGIN;$$);
16GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user;
17
18-- create another table owned by the super user (e.g., current user of the session)
19-- and GRANT access to the user
20CREATE SCHEMA "Mx Super User";
21SELECT 1 FROM run_command_on_workers($$CREATE SCHEMA "Mx Super User";$$);
22SET citus.next_shard_id TO 2980000;
23SET search_path TO "Mx Super User";
24CREATE TABLE super_user_owned_regular_user_granted (a int PRIMARY KEY, b int);
25SELECT create_reference_table ('"Mx Super User".super_user_owned_regular_user_granted');
26
27-- show that this table is owned by super user
28SELECT
29	rolsuper
30FROM
31	pg_roles
32		WHERE oid
33			IN
34		(SELECT relowner FROM pg_class WHERE oid = '"Mx Super User".super_user_owned_regular_user_granted'::regclass);
35
36-- make sure that granting produce the same output for both community and enterprise
37SET client_min_messages TO ERROR;
38GRANT USAGE ON SCHEMA "Mx Super User" TO regular_mx_user;
39GRANT INSERT ON TABLE super_user_owned_regular_user_granted TO regular_mx_user;
40
41SELECT 1 FROM run_command_on_workers($$GRANT USAGE ON SCHEMA "Mx Super User" TO regular_mx_user;$$);
42SELECT 1 FROM run_command_on_workers($$GRANT INSERT ON TABLE "Mx Super User".super_user_owned_regular_user_granted TO regular_mx_user;$$);
43SELECT 1 FROM run_command_on_placements('super_user_owned_regular_user_granted', $$GRANT INSERT ON TABLE %s TO regular_mx_user;$$);
44
45-- now that the GRANT is given, the regular user should be able to
46-- INSERT into the table
47\c - regular_mx_user - :master_port
48SET search_path TO "Mx Super User";
49COPY super_user_owned_regular_user_granted FROM STDIN WITH CSV;
501,1
512,1
52\.
53
54-- however, this specific user doesn't have UPDATE/UPSERT/DELETE/TRUNCATE
55-- permission, so  should fail
56INSERT INTO super_user_owned_regular_user_granted VALUES (1, 1), (2, 1) ON CONFLICT (a) DO NOTHING;
57TRUNCATE super_user_owned_regular_user_granted;
58DELETE FROM super_user_owned_regular_user_granted;
59UPDATE super_user_owned_regular_user_granted SET a = 1;
60
61-- AccessExclusiveLock == 8 is strictly forbidden for any user
62SELECT lock_shard_resources(8, ARRAY[2980000]);
63
64-- ExclusiveLock == 7 is forbidden for this user
65-- as only has INSERT rights
66SELECT lock_shard_resources(7, ARRAY[2980000]);
67
68-- but should be able to acquire RowExclusiveLock
69BEGIN;
70	SELECT count(*) > 0 as acquired_lock from pg_locks where pid = pg_backend_pid() AND locktype = 'advisory';
71	SELECT lock_shard_resources(3, ARRAY[2980000]);
72	SELECT count(*) > 0 as acquired_lock from pg_locks where pid = pg_backend_pid() AND locktype = 'advisory';
73COMMIT;
74
75-- acquring locks on non-existing shards is not meaningful but still we do not throw error as we might be in the middle
76-- of metadata syncing. We just do not acquire the locks
77BEGIN;
78	SELECT count(*) > 0 as acquired_lock from pg_locks where pid = pg_backend_pid() AND locktype = 'advisory';
79	SELECT lock_shard_resources(3, ARRAY[123456871]);
80	SELECT count(*) > 0 as acquired_lock from pg_locks where pid = pg_backend_pid() AND locktype = 'advisory';
81COMMIT;
82
83
84\c - postgres - :master_port;
85SET search_path TO "Mx Super User";
86SET client_min_messages TO ERROR;
87
88-- now allow users to do UPDATE on the tables
89GRANT UPDATE ON TABLE super_user_owned_regular_user_granted TO regular_mx_user;
90SELECT 1 FROM run_command_on_workers($$GRANT UPDATE ON TABLE "Mx Super User".super_user_owned_regular_user_granted TO regular_mx_user;$$);
91SELECT 1 FROM run_command_on_placements('super_user_owned_regular_user_granted', $$GRANT UPDATE ON TABLE %s TO regular_mx_user;$$);
92
93\c - regular_mx_user - :master_port
94SET search_path TO "Mx Super User";
95
96UPDATE super_user_owned_regular_user_granted SET b = 1;
97
98-- AccessExclusiveLock == 8 is strictly forbidden for any user
99-- even after UPDATE is allowed
100SELECT lock_shard_resources(8, ARRAY[2980000]);
101
102\c - postgres - :master_port;
103SET client_min_messages TO ERROR;
104DROP SCHEMA "Mx Super User" CASCADE;
105
106\c - postgres - :worker_1_port;
107SET client_min_messages TO ERROR;
108SET citus.enable_ddl_propagation TO OFF;
109CREATE SCHEMA "Mx Regular User";
110GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user;
111
112\c - postgres - :worker_2_port;
113SET client_min_messages TO ERROR;
114SET citus.enable_ddl_propagation TO OFF;
115CREATE SCHEMA "Mx Regular User";
116GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user;
117
118-- now connect with that user
119\c - regular_mx_user - :master_port
120SET search_path TO "Mx Regular User";
121SET citus.next_shard_id TO 1560000;
122
123-- make sure that we sync the metadata
124SET citus.shard_replication_factor TO 1;
125
126CREATE TABLE partitioned_table (long_column_names_1 int, long_column_names_2 int, long_column_names_3 int, long_column_names_4 int, long_column_names_5 int, long_column_names_6 int, long_column_names_7 int, long_column_names_8 int, long_column_names_9 int, long_column_names_10 int, long_column_names_11 timestamp) PARTITION BY RANGE (long_column_names_11);
127CREATE TABLE very_long_child_partition_name_is_required_to_repro_the_bug PARTITION OF partitioned_table FOR VALUES FROM ('2011-01-01') TO ('2012-01-01');
128
129SELECT create_distributed_table('partitioned_table', 'long_column_names_1');
130SELECT bool_and(hasmetadata) FROM pg_dist_node WHERE nodename = 'localhost' and nodeport IN (:worker_1_port, :worker_2_port);
131
132-- show that we can rollback
133BEGIN;
134	CREATE INDEX long_index_on_parent_table ON partitioned_table (long_column_names_1, long_column_names_2, long_column_names_3, long_column_names_4, long_column_names_5, long_column_names_6, long_column_names_11) INCLUDE (long_column_names_7, long_column_names_7, long_column_names_9, long_column_names_10);
135ROLLBACK;
136
137-- show that we can switch to sequential mode and still
138-- sync the metadata to the nodes
139BEGIN;
140	CREATE INDEX long_index_on_parent_table ON partitioned_table (long_column_names_1, long_column_names_2, long_column_names_3, long_column_names_4, long_column_names_5, long_column_names_6, long_column_names_11) INCLUDE (long_column_names_7, long_column_names_7, long_column_names_9, long_column_names_10);
141	show citus.multi_shard_modify_mode;
142COMMIT;
143
144-- make sure that partitioned tables, columnar and conversion to columnar workes fine
145-- on Citus MX with a non-super user
146CREATE SEQUENCE my_mx_seq;
147CREATE TABLE users_table_part(col_to_drop int, user_id int, value_1 int, value_2 bigint DEFAULT nextval('my_mx_seq'::regclass), value_3 bigserial) PARTITION BY RANGE (value_1);
148CREATE TABLE users_table_part_0 PARTITION OF users_table_part FOR VALUES FROM (0) TO (1);
149CREATE TABLE users_table_part_1 PARTITION OF users_table_part FOR VALUES FROM (1) TO (2);
150SELECT create_distributed_table('users_table_part', 'user_id', colocate_with:='partitioned_table');
151
152-- make sure that we can handle dropped columns nicely
153ALTER TABLE users_table_part DROP COLUMN col_to_drop;
154
155INSERT INTO users_table_part SELECT i, i %2, i %50 FROM generate_series(0, 100) i;
156
157BEGIN;
158	-- make sure to use multiple connections
159	SET LOCAL citus.force_max_query_parallelization TO ON;
160
161	CREATE TABLE users_table_part_2 PARTITION OF users_table_part FOR VALUES FROM (2) TO (3);
162	INSERT INTO users_table_part SELECT i, i %3, i %50 FROM generate_series(0, 100) i;
163
164	CREATE TABLE users_table_part_3 (user_id int, value_1 int, value_2 bigint, value_3 bigserial);
165	ALTER TABLE users_table_part ATTACH PARTITION users_table_part_3 FOR VALUES FROM (3) TO (4);
166	CREATE TABLE users_table_part_4 PARTITION OF users_table_part FOR VALUES FROM (4) TO (5) USING COLUMNAR;;
167COMMIT;
168
169SELECT alter_table_set_access_method('users_table_part_0', 'columnar');
170SELECT alter_table_set_access_method('users_table_part_0', 'heap');
171
172BEGIN;
173	SET LOCAL citus.force_max_query_parallelization TO ON;
174	SELECT alter_table_set_access_method('users_table_part_0', 'columnar');
175	SELECT alter_table_set_access_method('users_table_part_0', 'heap');
176ROLLBACK;
177
178BEGIN;
179	SET LOCAL citus.force_max_query_parallelization TO ON;
180	SELECT undistribute_table('users_table_part');
181	SELECT create_distributed_table('users_table_part', 'user_id');
182COMMIT;
183
184BEGIN;
185	-- make sure to use multiple connections
186	SET LOCAL citus.force_max_query_parallelization TO ON;
187	SELECT alter_distributed_table('users_table_part', shard_count:=9, cascade_to_colocated:=false);
188ROLLBACK;
189
190BEGIN;
191	-- make sure to use multiple connections
192	SET LOCAL citus.force_max_query_parallelization TO ON;
193	ALTER TABLE users_table_part ADD COLUMN my_column INT DEFAULT 15;
194	CREATE INDEX test_index ON users_table_part(value_3, value_2);
195	CREATE INDEX test_index_on_child ON users_table_part_3(value_3, value_2);
196ROLLBACK;
197
198CREATE TABLE local_table_in_the_metadata (id int PRIMARY KEY, value_1 int);
199
200CREATE TABLE reference_table(id int PRIMARY KEY, value_1 int);
201SELECT create_reference_table('reference_table');
202
203CREATE TABLE on_delete_fkey_table(id int PRIMARY KEY, value_1 int);
204SELECT create_distributed_table('on_delete_fkey_table', 'id', colocate_with:='partitioned_table');
205ALTER TABLE reference_table ADD CONSTRAINT fkey_to_local FOREIGN KEY(id) REFERENCES local_table_in_the_metadata(id);
206ALTER TABLE on_delete_fkey_table ADD CONSTRAINT veerrrrrrryyy_veerrrrrrryyy_veerrrrrrryyy_long_constraint_name FOREIGN KEY(value_1) REFERENCES reference_table(id) ON DELETE CASCADE;
207INSERT INTO local_table_in_the_metadata SELECT i, i FROM generate_series(0, 100) i;
208INSERT INTO reference_table SELECT i, i FROM generate_series(0, 100) i;
209INSERT INTO on_delete_fkey_table SELECT i, i % 100  FROM generate_series(0, 1000) i;
210
211-- make sure that we can handle switching to sequential execution nicely
212-- on MX with a regular user
213BEGIN;
214	DELETE FROM reference_table WHERE id > 50;
215	SHOW citus.multi_shard_modify_mode;
216	ALTER TABLE on_delete_fkey_table ADD COLUMN t int DEFAULT 10;
217	SELECT avg(t) FROM on_delete_fkey_table;
218ROLLBACK;
219
220-- make sure to use multiple connections per node
221SET citus.force_max_query_parallelization TO ON;
222CREATE INDEX CONCURRENTLY concurrent_index_test ON on_delete_fkey_table(id);
223CREATE UNIQUE INDEX unique_key_example ON on_delete_fkey_table(id, value_1);
224
225BEGIN;
226	TRUNCATE local_table_in_the_metadata, reference_table, on_delete_fkey_table;
227	SELECT count(*) FROM local_table_in_the_metadata;
228	SELECT count(*) FROM reference_table;
229	SELECT count(*) FROM on_delete_fkey_table;
230ROLLBACK;
231
232BEGIN;
233	SET citus.multi_shard_modify_mode TO 'sequential';
234	TRUNCATE on_delete_fkey_table CASCADE;
235	TRUNCATE reference_table CASCADE;
236	SELECT count(*) FROM local_table_in_the_metadata JOIN reference_table USING(id) JOIN on_delete_fkey_table USING(id);
237ROLLBACK;
238
239-- join involving local, reference and distributed tables
240SELECT count(*) FROM local_table_in_the_metadata JOIN reference_table USING(id) JOIN on_delete_fkey_table USING(id);
241
242-- query with intermediate results
243WITH cte_1 AS (SELECT * FROM on_delete_fkey_table ORDER BY 1,2 DESC LIMIT 10)
244	SELECT count(*) FROM cte_1;
245
246-- query with intermediate results on remote nodes
247WITH cte_1 AS (SELECT * FROM on_delete_fkey_table ORDER BY 1,2 DESC LIMIT 10)
248	SELECT count(*) FROM cte_1 JOIN on_delete_fkey_table USING(value_1);
249
250-- repartition joins
251SET citus.enable_repartition_joins to ON;
252SELECT count(*) FROM on_delete_fkey_table o1 JOIN on_delete_fkey_table o2 USING(value_1);
253
254-- repartition INSERT .. SELECT
255INSERT INTO on_delete_fkey_table (id, value_1) SELECT value_1, id FROM on_delete_fkey_table ON CONFLICT DO NOTHING;
256
257-- make sure that we can create a type and use it in the same tx
258BEGIN;
259	CREATE TYPE test_type AS (a int, b int);
260	CREATE TABLE composite_key (id int PRIMARY KEY, c int, data test_type);
261	SELECT create_distributed_table('composite_key', 'id', colocate_with:='partitioned_table');
262COMMIT;
263
264-- index statistics should work fine
265CREATE INDEX test_index_on_parent ON users_table_part((value_3+value_2));
266ALTER INDEX test_index_on_parent ALTER COLUMN 1 SET STATISTICS 4646;
267DROP INDEX test_index_on_parent;
268
269ALTER TABLE composite_key ALTER COLUMN c TYPE float USING (b::float + 0.5);
270
271-- make sure that rebalancer works fine with a regular user on MX
272-- first make sure that we can rollback
273BEGIN;
274	SELECT citus_move_shard_placement(1560000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'block_writes');
275ROLLBACK;
276
277SELECT citus_move_shard_placement(1560000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'block_writes');
278
279-- connect to the worker to see if the table has the correct owned and placement metadata
280\c - postgres - :worker_2_port
281SELECT
282	1560000, groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port AND nodename = 'localhost' AND isactive)
283FROM
284	pg_dist_placement
285WHERE
286	shardid = 1560000;
287
288-- also make sure that pg_dist_shard_placement is updated correctly
289SELECT
290	nodeport = :worker_2_port
291FROM pg_dist_shard_placement WHERE shardid = 1560000;
292
293\c - postgres - :worker_1_port
294SELECT
295	1560000, groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port AND nodename = 'localhost' AND isactive)
296FROM
297	pg_dist_placement
298WHERE
299	shardid = 1560000;
300
301-- also make sure that pg_dist_shard_placement is updated correctly
302SELECT
303	nodeport = :worker_2_port
304FROM pg_dist_shard_placement WHERE shardid = 1560000;
305
306-- now connect with the user to the coordinator again
307\c - regular_mx_user - :master_port
308SET search_path TO "Mx Regular User";
309
310-- make sure that we can still execute queries
311SELECT count(*) FROM local_table_in_the_metadata JOIN reference_table USING(id) JOIN on_delete_fkey_table USING(id);
312
313-- now, call directly the rebalancer, which should also work fine
314SELECT rebalance_table_shards(shard_transfer_mode:='block_writes');
315-- make sure that we can still execute queries
316SELECT count(*) FROM local_table_in_the_metadata JOIN reference_table USING(id) JOIN on_delete_fkey_table USING(id);
317
318-- lets run some queries from the workers
319\c - regular_mx_user - :worker_2_port
320SET search_path TO "Mx Regular User";
321SELECT count(*) FROM local_table_in_the_metadata JOIN reference_table USING(id) JOIN on_delete_fkey_table USING(id);
322BEGIN;
323	TRUNCATE reference_table CASCADE;
324ROLLBACK;
325
326-- join involving local, reference and distributed tables
327SELECT count(*) FROM local_table_in_the_metadata JOIN reference_table USING(id) JOIN on_delete_fkey_table USING(id);
328
329-- query with intermediate results
330WITH cte_1 AS (SELECT * FROM on_delete_fkey_table ORDER BY 1,2 DESC LIMIT 10)
331	SELECT count(*) FROM cte_1;
332
333-- query with intermediate results on remote nodes
334WITH cte_1 AS (SELECT * FROM on_delete_fkey_table ORDER BY 1,2 DESC LIMIT 10)
335	SELECT count(*) FROM cte_1 JOIN on_delete_fkey_table USING(value_1);
336
337-- repartition joins
338SET citus.enable_repartition_joins to ON;
339SELECT count(*) FROM on_delete_fkey_table o1 JOIN on_delete_fkey_table o2 USING(value_1);
340
341BEGIN;
342	SET LOCAL citus.force_max_query_parallelization TO ON;
343	DELETE FROM on_delete_fkey_table;
344	WITH cte_1 AS (SELECT * FROM on_delete_fkey_table ORDER BY 1,2 DESC LIMIT 10)
345	SELECT count(*) FROM cte_1;
346COMMIT;
347
348\c - postgres - :master_port
349
350-- resync the metadata to both nodes for test purposes and then stop
351SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
352SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
353
354SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
355SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
356
357DROP SCHEMA "Mx Regular User" CASCADE;
358