This commit is contained in:
root
2024-04-24 10:25:44 +08:00
parent 627bf43ee3
commit 248388a322
5271 changed files with 3753425 additions and 803 deletions

View File

@@ -0,0 +1,31 @@
USE test;
-- This file contains queries that can be used to check table consistency after running the stress_tx_rr test suite.
-- Contains a mix of queries used throughout the actual test suite.
-- When server is running, run this script e.g. like this:
-- ./bin/mysql -v -u root -h 127.0.0.1 < mysql-test/suite/engines/rr_trx/check_consistency.sql
CHECK TABLE t1;
ANALYZE TABLE t1;
-- Sum of all integers that are part of the test data should be 0
SELECT SUM(`int1` + `int1_key` + `int1_unique`
+ `int2` + `int2_key` + `int2_unique`)
AS TotalSum
FROM t1;
-- No uncommitted data should be visible to a REPEATABLE-READ transaction
SELECT * FROM t1 WHERE `is_uncommitted` = 1;
-- No rows marked as consistent should have row-sum not equal to 0
SELECT * FROM t1 WHERE @sum:=`int1` + `int1_key` + `int1_unique` + `int2` + `int2_key` + `int2_unique` <> 0 AND `is_consistent` = 1;
-- Check the table count. SHOULD NOT BE 0.
SELECT COUNT(*) FROM t1;
-- The count of rows with pk divisible by 5 should be constant.
-- (less useful when there is no concurrency, though)
SELECT COUNT(*) FROM t1 WHERE `pk` MOD 5 = 0 AND `pk` BETWEEN 1 AND 1000;
-- Check statistics (any number is OK, we are only looking for an impractical amount of errors
SELECT * FROM statistics;

View File

@@ -0,0 +1,75 @@
################################################################################
#
# Checks if an error has occurred and if so rolls back the entire transaction.
# Only source this file when such behavior is needed.
#
# Since this file needs to be sourced _after_ the statement that we want to check
# for error, any unacceptable errors will have already caused the test to fail.
# If we get this far, we know that the error was a valid one.
#
# Typical usage in testcase:
# -------------------------------------------------------------------
# --error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT
# UPDATE t1 SET `int1` = `int1` - 4 WHERE `pk` < 25 LIMIT 1;
# --source suite/stress_tx_rr/include/check_for_error_rollback.inc
# -------------------------------------------------------------------
#
# Examples of "valid" error types in transactional testing:
# 1205 - ER_LOCK_WAIT_TIMEOUT
# 1213 - ER_LOCK_DEADLOCK
# 1020 - ER_CHECKREAD (Falcon: "Record has changed since last read")
#
# In some situations duplicate key errors etc. are also valid.
#
# We keep an approximate count of the number of errors / rollbacks.
# We don't distinguish between error types, as this would require extra queries,
# reducing concurrency.
#
# We do an explicit rollback to make sure all engines have identical behavior on
# transactional errors (some engines only roll back the last statement in some
# cases).
# We don't show this in the result file because we don't know when it will
# occur and we don't want diffs because of legitimate ROLLBACKs. If, however
# we want to go back and trace ROLLBACKS of this kind, then we need another
# solution.
#
# NOTE: Use check_for_error_rollback_skip.inc instead if subsequent statements
# depend on the statements executed before calling this script, and handling
# varying test output gets too complicated.
#
################################################################################
--disable_query_log
# (Re-) set the error variable in case it has been set to a different value previously.
# This value may be read by the wrapping test script to check if there really
# was an error or not.
let $error= 0;
if ($mysql_errno)
{
# Last statement sent to the server resulted in an error (0 means no error).
# Set error variable, because this is used by wrapping tests to determine whether or not
# to continue with other statements in the same transaction. If set, this indicates that
# the last statement executed before calling this script resulted in an error.
let $error= $mysql_errno;
## Old code for determining error type...
#let $deadlock= `SELECT IF($mysql_errno = 1213, 1, 0)`;
#let $timeout= `SELECT IF($mysql_errno = 1205, 1, 0)`;
#if ($deadlock) { ... } (etc.)
# Do a full rollback of the current transaction.
ROLLBACK;
# update statistics
# TODO: Only do this every n times (e.g. n = 10 or 100) to reduce contention.
# An idea is to use some MOD expression to determine this (e.g. mod of timestamp or conn_id).
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE statistics SET tx_errors = tx_errors + 1;
}
--enable_query_log

View File

@@ -0,0 +1,76 @@
################################################################################
#
# Checks if an error has occurred and if so rolls back the entire transaction.
# Only source this file when such behavior is needed.
#
# Since this file needs to be sourced _after_ the statement that we want to check
# for error, any unacceptable errors will have already caused the test to fail.
# If we get this far, we know that the error was a valid one.
#
# Typical usage in testcase:
# -------------------------------------------------------------------
# --error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT
# UPDATE t1 SET `int1` = `int1` - 4 WHERE `pk` < 25 LIMIT 1;
# --source suite/stress_tx_rr/include/check_for_error_rollback.inc
# -------------------------------------------------------------------
#
# Examples of "valid" error types in transactional testing:
# 1205 - ER_LOCK_WAIT_TIMEOUT
# 1213 - ER_LOCK_DEADLOCK
# 1020 - ER_CHECKREAD (Falcon: "Record has changed since last read")
#
# In some situations duplicate key errors etc. are also valid.
#
# We keep an approximate count of the number of errors / rollbacks.
# We don't distinguish between error types, as this would require extra queries,
# reducing concurrency.
#
# We do an explicit rollback to make sure all engines have identical behavior on
# transactional errors (some engines only roll back the last statement in some
# cases).
# We don't show this in the result file because we don't know when it will
# occur and we don't want diffs because of legitimate ROLLBACKs. If, however
# we want to go back and trace ROLLBACKS of this kind, then we need another
# solution.
#
# At this time we skip the rest of the test to avoid rsult file diff problems
# in error situations vs. non-error situations in later parts of the test,
# e.g. repeatable read checking (requires some output to be useful).
#
################################################################################
--disable_query_log
# (Re-) set the error variable in case it has been set to a different value previously.
# This value may be read by the wrapping test script to check if there really
# was an error or not.
let $error= 0;
if ($mysql_errno)
{
# Last statement sent to the server resulted in an error (0 means no error).
# Set error variable, because this is used by wrapping tests to determine whether or not
# to continue with other statements in the same transaction. If set, this indicates that
# the last statement executed before calling this script resulted in an error.
let $error= $mysql_errno;
## Old code for determining error type...
#let $deadlock= `SELECT IF($mysql_errno = 1213, 1, 0)`;
#let $timeout= `SELECT IF($mysql_errno = 1205, 1, 0)`;
#if ($deadlock) { ... } (etc.)
# Do a full rollback of the current transaction.
ROLLBACK;
# update statistics
# TODO: Only do this every n times (e.g. n = 10 or 100) to reduce contention.
# An idea is to use some MOD expression to determine this (e.g. mod of timestamp or conn_id).
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE statistics SET tx_errors = tx_errors + 1;
--skip Skip rest of the test due to transactional error (deadlock, timeout, etc.)
}
--enable_query_log

View File

@@ -0,0 +1,225 @@
################################################################################
#
# Verifies that queries in a REPEATABLE READ transaction is indeed repeatable.
# Wrong results are shown as a result set based on one or more specially crafted
# queries. Normally these result sets should be empty.
#
# We want to verify that reads (SELECTs) are indeed repeatable during a
# REPEATABLE READ transaction.
#
# Generally, queries which should yield the same results at one moment in time
# should also yield the same results later in the same transaction. In some
# configurations, however, phantom reads are allowed (may e.g. depend on
# settings such as falcon_consistent_read).
#
# The check will fail if rows are changed or missing when comparing later
# queries to earlier ones.
# TODO: Phantom reads.
# Note: There is a separate test looking for `is_uncommitted` = 1.
#
# Assumptions:
# - we are in a REPEATABLE READ transaction with autocommit OFF.
# - queries include all columns of table (t1) (we SELECT columns by name)
#
# Requires/using the following variables:
# $query_count - the number of queries to compare.
# Will also be used to deduce the name of the temp table in
# which the query results should be stored (see
# record_query_all_columns.inc).
#
################################################################################
# Show results of next queries. Empty results is OK. Non-empty means failure.
--enable_result_log
# The mysqltest language is unfortunaltely not very flexible, but we try our
# best to compare query results this way:
# - For each query, compare with previous query
# - this requires that at least 2 queries have been stored
# - Number of queries should be stored as $query_count
# - Results should be stored in temp tables with names ending with the query
# number, and with prefix "tmp".
# - E.g. compare "tmp2" with "tmp1", "tmp3" with "tmp2", "tmp4" with "tmp3" etc.
# - Fail the test once we detect changed or missing or invalid extra rows in
# latter query.
# ?????
# - Problem is that if one of the queries deadlocked or timed out, we may not
# have enough result sets to compare, so output will vary depending on this.
# Still we need the output from these checks to see which rows are missing or
# changed.
# So, if we don't have enough queries we fake "correct output" to make mysqltest
# happy.
#
# Unfortunately, we need to utilize SQL and spend client-server roundtrips
# in order to do some computations that the mysqltest language does not handle.
# We try to use mysqltest variables instead where possible, as this should be
# less expensive in terms of CPU usage and time spenditure.
#
# First, check that we have at least two query results stored.
# We need at least 2 to be able to compare.
# Some results may not have been stored due to locking errors (see record_query_all_columns.inc), so
# we cannot assume that we always have at least 2 query results stored.
# If less than 2 query results are stored, return to calling test/script.
#
if (`SELECT IF($query_count > 1, 1, 0)`)
{
--echo ***************************************************************************
--echo * Checking REPEATABLE READ by comparing result sets from same transaction
--echo ***************************************************************************
--echo *** Query log disabled. See include files used by test for query details.
--disable_query_log
let $queryA= 1;
let $queryB= 2;
let $more_queries= $query_count;
# We start out by comparing the first 2 queries, so the while loop should run
# $query_count - 1 times. (If we have 3 queries, compare 1 with 2, 2 and 3).
--dec $more_queries
while ($more_queries)
{
# We still have one or more queries that have not been compared to the
# previous query.
# Compare queryB ("current query") with queryA ("previous query")
#--source suite/stress_tx_rr/include/compare_queries_with_pk.inc
let $tableA= tmp$queryA;
let $tableB= tmp$queryB;
--echo *** Comparing query $queryA (A) with query $queryB (B):
#
# In the following queries, 'SELECT * ...' could have been used instead of
# 'SELECT tmp1.pk AS ...' etc., but the latter makes it easier to compare the first
# result set to the second in test/diff output.
###########################
# Detect extra rows:
# Allow phantoms in some configurations:
# - InnoDB default settings
# - Falcon's falcon_consistent_read=0 (non-default setting)
# (TODO: What about PBXT?)
#
###########################
# TODO: Execute a query against tmp1 and tmp2 which selects new rows (rows
# present in tmp2 that are not present in tmp1) that are of the uncommitted
# variety (field `is_uncommitted` = 1).
# E.g. something like:
# SELECT ...
# FROM tmp2 LEFT JOIN tmp1
# ON tmp1.`pk` = tmp2.`pk`
# WHERE tmp1.`int1` IS NULL
# OR tmp1.`int1_key` IS NULL
# OR tmp1.`int1_unique` IS NULL
# OR tmp1.`int2` IS NULL
# OR tmp1.`int2_key` IS NULL
# OR tmp1.`int2_unique` IS NULL
# AND tmp2.`is_uncommitted` = 1;
--echo ###########################
--echo # Detect missing rows:
--echo ###########################
eval SELECT $tableA.pk AS 'A.pk',
$tableB.pk AS 'B.pk',
$tableA.id AS 'A.id',
$tableB.id AS 'B.id',
$tableA.`int1` AS 'A.int1',
$tableB.`int1` AS 'B.int1',
$tableA.`int1_key` AS 'A.int1_key',
$tableB.`int1_key` AS 'B.int1_key',
$tableA.`int1_unique` AS 'A.int1_unique',
$tableB.`int1_unique` AS 'B.int1_unique',
$tableA.`int2` AS 'A.int2',
$tableB.`int2` AS 'B.int2',
$tableA.`int2_key` AS 'A.int2_key',
$tableB.`int2_key` AS 'B.int2_key',
$tableA.`int2_unique` AS 'A.int2_unique',
$tableB.`int2_unique` AS 'B.int2_unique',
$tableA.`for_update` AS 'A.for_update',
$tableB.`for_update` AS 'B.for_update',
$tableA.timestamp AS 'A.timestamp',
$tableB.timestamp AS 'B.timestamp',
$tableA.`connection_id` AS 'A.connection_id',
$tableB.`connection_id` AS 'B.connection_id',
$tableA.`thread_id` AS 'A.thread_id',
$tableB.`thread_id` AS 'B.thread_id',
$tableA.`is_uncommitted` AS 'A.is_uncommitted',
$tableB.`is_uncommitted` AS 'B.is_uncommitted',
$tableA.`is_consistent` AS 'A.is_consistent',
$tableB.`is_consistent` AS 'B.is_consistent'
FROM $tableA LEFT JOIN $tableB
ON $tableA.`pk` = $tableB.`pk`
WHERE $tableB.`pk` IS NULL;
#
# OR $tableB.`int1_key` IS NULL
# OR $tableB.`int1_unique` IS NULL
# OR $tableB.`int2` IS NULL
# OR $tableB.`int2_key` IS NULL
# OR $tableB.`int2_unique` IS NULL;
--echo
--echo ###########################
--echo # Detect changed rows:
--echo ###########################
eval SELECT $tableA.pk AS 'A.pk',
$tableB.pk AS 'B.pk',
$tableA.id AS 'A.id',
$tableB.id AS 'B.id',
$tableA.`int1` AS 'A.int1',
$tableB.`int1` AS 'B.int1',
$tableA.`int1_key` AS 'A.int1_key',
$tableB.`int1_key` AS 'B.int1_key',
$tableA.`int1_unique` AS 'A.int1_unique',
$tableB.`int1_unique` AS 'B.int1_unique',
$tableA.`int2` AS 'A.int2',
$tableB.`int2` AS 'B.int2',
$tableA.`int2_key` AS 'A.int2_key',
$tableB.`int2_key` AS 'B.int2_key',
$tableA.`int2_unique` AS 'A.int2_unique',
$tableB.`int2_unique` AS 'B.int2_unique',
$tableA.`for_update` AS 'A.for_update',
$tableB.`for_update` AS 'B.for_update',
$tableA.timestamp AS 'A.timestamp',
$tableB.timestamp AS 'B.timestamp',
$tableA.`connection_id` AS 'A.connection_id',
$tableB.`connection_id` AS 'B.connection_id',
$tableA.`thread_id` AS 'A.thread_id',
$tableB.`thread_id` AS 'B.thread_id',
$tableA.`is_uncommitted` AS 'A.is_uncommitted',
$tableB.`is_uncommitted` AS 'B.is_uncommitted',
$tableA.`is_consistent` AS 'A.is_consistent',
$tableB.`is_consistent` AS 'B.is_consistent'
FROM $tableB INNER JOIN $tableA
ON $tableB.`pk` = $tableA.`pk`
WHERE $tableB.`int1` <> $tableA.`int1`
OR $tableB.`int1_key` <> $tableA.`int1_key`
OR $tableB.`int1_unique` <> $tableA.`int1_unique`
OR $tableB.`int2` <> $tableA.`int2`
OR $tableB.`int2_key` <> $tableA.`int2_key`
OR $tableB.`int2_unique` <> $tableA.`int2_unique`;
--dec $more_queries
--inc $queryA
--inc $queryB
}
--enable_query_log
}
## Cleanup is skipped because temporary tables and prepared statements will
## be cleaned up automatically by the server when this session ends, and we
## want to have as few client-server roundtrips as possible (thus avoid
## unnecessary SQL statement executions).

View File

@@ -0,0 +1,57 @@
################################################################################
#
# Stores results from a given query in a temporary table.
#
# This table can then be used later in the same session for comparing results in
# various stages of a transaction (see check_repeatable_read.inc).
#
# The table name will be: tmp$query_count
# where $query_count is the value of the counter for the number of stored
# queries in this session. Example: "tmp1"
#
# We increment the counter (session scope) for the number of queries so that
# checker scripts may
# a) know how many queries to compare
# b) determine the name of the temp tables storing each query
#
# Assumptions:
# - we may be in the middle of a transaction with autocommit OFF.
# - queries include all columns of table (t1). This is because we want to
# successfully add indexes to columns such as `pk`, `int1_key`, etc.
#
# Requires the following variables to be set:
# $query - the query to be run, which results will be stored in a temp table.
#
# Modifies the following variables:
# $query_count - the number of queries processed by this script so far in this
# session.
# $tmptable - helper variable containing the name of the temp table.
#
# The pattern is "CREATE TEMPORARY TABLE tmpx SELECT ...". This allows us to
# store query results by using SQL without causing implicit commits.
#
################################################################################
# increment the query counter
--inc $query_count
let $tmptable= tmp$query_count;
# Execute the query and store results in a new temp table.
# Creating indexes now because we cannot do that later withut causing implicit commit.
# Therefore we assume that columns of these names exist in the result set produced by the queries.
--echo *** Disabling query log (we may deadlock and not do this after all)
--disable_query_log
# Also disable warnings, because we get 'Unsafe to binlog' warnings for this with 'newer' server versions.
--disable_warnings
--echo *** Creating temp table with results from query '$query' unless we deadlock or time out.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT
--eval CREATE TEMPORARY TABLE $tmptable (PRIMARY KEY (`pk`), KEY (`int1_key`), KEY (`int2_key`), UNIQUE (`int1_unique`), UNIQUE (`int2_unique`)) $query
--enable_warnings
# We may not have been able to create temp table due to locking constraints.
# In that case, roll back the statement and skip the rest of the test.
--source suite/stress_tx_rr/include/check_for_error_rollback_skip.inc
--echo *** Enabling query log
--enable_query_log

View File

@@ -0,0 +1,115 @@
#
# Specify storage engine to use. Variable $engine is set in wrapper test.
#
eval SET @@storage_engine = $engine;
if (`SELECT @@storage_engine LIKE 'InnoDB' AND @@version LIKE '%6.%'`)
{
# Need this due to Bug#43447 - Crash when executing SELECT ... LIMIT n FOR UPDATE query
# Hopefully temporary...
# Not applicable to 5.1 server or earlier.
--disable_query_log
SET GLOBAL optimizer_use_mrr='disable';
SET GLOBAL engine_condition_pushdown=off;
--enable_query_log
}
if (`SELECT @@storage_engine LIKE 'PBXT' AND @@version LIKE '%5.1%'`)
{
--disable_query_log
SET SESSION binlog_format = 'MIXED';
SET GLOBAL binlog_format = 'MIXED';
--enable_query_log
}
# Verify default storage engine.
SHOW VARIABLES LIKE 'storage_engine';
# Verify default isolation level
SHOW VARIABLES LIKE 'tx_isolation';
#
# Create table for keeping track of test metadata/statistics (counters etc.).
# (Need a data structure that will hold across tests, clients, sessions).
# Expand/modify if needeed, but take care of test files using it.
#
# Columns:
# deadlocks - keeps track of the total number of deadlocks so far.
#
# Using default storage engine (see above).
CREATE TABLE statistics (
tx_errors INTEGER NOT NULL
);
# Initialize statistics table.
INSERT INTO statistics (tx_errors) VALUES (0);
#
# Create main test / data table. Some notes:
# * timestamp is automatically DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
# * BOOLEAN is TINYINT(1)
# * `is_consistent` means that the sum of int1* and int2* columns in that row
# is = 0. NOTE: Do not change meaning unless you take care to
# change test cases that rely on it (e.g. rr_id_900).
# Set `is_consistent` = 0 if changing a row's sum to non-zero.
#
# TODO: Get TID (thread_id) from mysql-stress-test.pl somehow.
#
CREATE TABLE t1 (
`pk` INTEGER AUTO_INCREMENT NOT NULL,
`id` INTEGER NOT NULL,
`int1` INTEGER,
`int1_key` INTEGER,
`int1_unique` INTEGER,
`int2` INTEGER,
`int2_key` INTEGER,
`int2_unique` INTEGER,
`for_update` BOOLEAN DEFAULT 0,
`timestamp` TIMESTAMP,
`connection_id` INTEGER,
`thread_id` INTEGER DEFAULT 0,
`is_uncommitted` BOOLEAN DEFAULT 0,
`is_consistent` BOOLEAN DEFAULT 0,
KEY (`id`),
KEY (`int1_key`),
KEY (`int2_key`),
UNIQUE (`int1_unique`),
UNIQUE (`int2_unique`),
PRIMARY KEY (`pk`)
);
# Check that the table was really created with the intended storage engine.
SHOW CREATE TABLE t1;
## Procedure for inserting the value 1000 into integer fieds, "rows" times.
--delimiter //
eval CREATE PROCEDURE insertRows(rows INT)
BEGIN
SET @n = 1;
REPEAT
INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`,
`is_uncommitted`, `is_consistent`)
VALUES (0, 1000, 1000, @n,
-1000, -1000, -@n,
0, CONNECTION_ID(), 0,
0, 1);
SET @n = @n + 1;
UNTIL @n > rows
END REPEAT;
END;
//
--delimiter ;
## Insert 1000 rows.
CALL insertRows(1000);
## Check the sum of all int columns
SELECT SUM(`int1` + `int1_key` + `int1_unique`
+ `int2` + `int2_key` + `int2_unique`)
AS TotalSum
FROM t1;

View File

@@ -0,0 +1 @@
init_innodb

View File

@@ -0,0 +1,81 @@
SET @@storage_engine = 'InnoDB';
SHOW VARIABLES LIKE 'storage_engine';
Variable_name Value
storage_engine InnoDB
SHOW VARIABLES LIKE 'tx_isolation';
Variable_name Value
tx_isolation REPEATABLE-READ
CREATE TABLE statistics (
tx_errors INTEGER NOT NULL
);
INSERT INTO statistics (tx_errors) VALUES (0);
CREATE TABLE t1 (
`pk` INTEGER AUTO_INCREMENT NOT NULL,
`id` INTEGER NOT NULL,
`int1` INTEGER,
`int1_key` INTEGER,
`int1_unique` INTEGER,
`int2` INTEGER,
`int2_key` INTEGER,
`int2_unique` INTEGER,
`for_update` BOOLEAN DEFAULT 0,
`timestamp` TIMESTAMP,
`connection_id` INTEGER,
`thread_id` INTEGER DEFAULT 0,
`is_uncommitted` BOOLEAN DEFAULT 0,
`is_consistent` BOOLEAN DEFAULT 0,
KEY (`id`),
KEY (`int1_key`),
KEY (`int2_key`),
UNIQUE (`int1_unique`),
UNIQUE (`int2_unique`),
PRIMARY KEY (`pk`)
);
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`pk` int(11) NOT NULL AUTO_INCREMENT,
`id` int(11) NOT NULL,
`int1` int(11) DEFAULT NULL,
`int1_key` int(11) DEFAULT NULL,
`int1_unique` int(11) DEFAULT NULL,
`int2` int(11) DEFAULT NULL,
`int2_key` int(11) DEFAULT NULL,
`int2_unique` int(11) DEFAULT NULL,
`for_update` tinyint(1) DEFAULT '0',
`timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`connection_id` int(11) DEFAULT NULL,
`thread_id` int(11) DEFAULT '0',
`is_uncommitted` tinyint(1) DEFAULT '0',
`is_consistent` tinyint(1) DEFAULT '0',
PRIMARY KEY (`pk`),
UNIQUE KEY `int1_unique` (`int1_unique`),
UNIQUE KEY `int2_unique` (`int2_unique`),
KEY `id` (`id`),
KEY `int1_key` (`int1_key`),
KEY `int2_key` (`int2_key`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
CREATE PROCEDURE insertRows(rows INT)
BEGIN
SET @n = 1;
REPEAT
INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`,
`is_uncommitted`, `is_consistent`)
VALUES (0, 1000, 1000, @n,
-1000, -1000, -@n,
0, CONNECTION_ID(), 0,
0, 1);
SET @n = @n + 1;
UNTIL @n > rows
END REPEAT;
END;
//
CALL insertRows(1000);
SELECT SUM(`int1` + `int1_key` + `int1_unique`
+ `int2` + `int2_key` + `int2_unique`)
AS TotalSum
FROM t1;
TotalSum
0

View File

@@ -0,0 +1,10 @@
SET autocommit = 0;
START TRANSACTION;
*** Running query: SELECT COUNT(*) FROM t1
SELECT COUNT(*) FROM t1 WHERE `pk` MOD 5 = 0 AND `pk` BETWEEN 1 AND 1000;
COUNT(*)
200
SELECT COUNT(*) FROM t1 WHERE `pk` MOD 5 = 0 AND `pk` BETWEEN 1 AND 1000;
COUNT(*)
200
COMMIT;

View File

@@ -0,0 +1,3 @@
SELECT * FROM statistics;
tx_errors
0

View File

@@ -0,0 +1,58 @@
SET autocommit = 0;
START TRANSACTION;
*** multi-statemement insert, inserting first positive then negative number:
INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (40, 40, 40, CONNECTION_ID(),
-40, -40, -CONNECTION_ID(),
0, CONNECTION_ID(), 0, 0, 1);
INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (-40, -40, -40, -CONNECTION_ID(),
40, 40, CONNECTION_ID(),
0, CONNECTION_ID(), 0, 0, 1);
COMMIT;
START TRANSACTION;
*** insert multiple rows using a single statement:
INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (41, 41, 41, CONNECTION_ID()+1,
-41, -41, -(CONNECTION_ID()+1),
0, CONNECTION_ID(), 0, 0, 1),
(41, 41, 41, CONNECTION_ID()+2,
41, 41, CONNECTION_ID()+2,
0, CONNECTION_ID(), 0, 0, 0),
(41, -41, -41, -(CONNECTION_ID()+2),
-41, -41, -(CONNECTION_ID()+2),
0, CONNECTION_ID(), 0, 0, 0);
COMMIT;
START TRANSACTION;
*** INSERT IGNORE using both known duplicate values and non-duplicates:
INSERT IGNORE INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (42, 42, 42, CONNECTION_ID()+3,
-42, -42, -(CONNECTION_ID()+3),
0, CONNECTION_ID(), 0, 0, 1);
INSERT IGNORE INTO t1 (`pk`, `id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (5, 43, 42, 42, CONNECTION_ID(),
-42, -42, CONNECTION_ID(),
0, CONNECTION_ID(), 0, 0, 0);
INSERT IGNORE INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (44, 42, 42, (CONNECTION_ID() + 1000) MOD 5000,
-42, -42, -((CONNECTION_ID() + 1000) MOD 5000),
0, CONNECTION_ID(), 0, 0, 1);
COMMIT;

View File

@@ -0,0 +1,5 @@
SET autocommit = 0;
START TRANSACTION;
INSERT INTO t1 (`id`, `int1`, `connection_id`, `is_uncommitted`)
VALUES (3, 3, CONNECTION_ID(), 1);
COMMIT;

View File

@@ -0,0 +1,33 @@
SET autocommit = 0;
START TRANSACTION;
*** Delete a row and re-insert with same `pk`:
*** Disabling result log
SELECT @pk:=`pk`,
@unique1:=`int1_unique`,
@unique2:=`int2_unique`
FROM t1 WHERE `pk` MOD 5 = 4 AND `pk` > 900 AND `is_consistent` = 1 LIMIT 1 FOR UPDATE;
*** Enabling result log
DELETE FROM t1 WHERE `pk` = @pk;
*** Doing insert of row with pk = @pk if above statement succeeded (query log disabled)...
COMMIT;
START TRANSACTION;
*** Delete a row and re-insert with `pk` = NULL:
*** Disabling result log
SELECT @pk:=`pk`,
@unique1:=`int1_unique`,
@unique2:=`int2_unique`
FROM t1 WHERE `pk` MOD 5 = 4 AND `pk` > 901 AND `is_consistent` = 1 LIMIT 1 FOR UPDATE;
*** Enabling result log
DELETE FROM t1 WHERE `pk` = @pk;
*** Doing insert of row with pk = NULL if above statement succeeded (query log disabled)...
COMMIT;
START TRANSACTION;
*** Delete up to two (2) "consistent" rows (zero-sum; table sum unchanged)
DELETE FROM t1 WHERE `pk` > 902 AND `pk` MOD 5 = 3 AND `is_consistent` = 1 LIMIT 2;
COMMIT;

View File

@@ -0,0 +1,13 @@
SET autocommit = 0;
START TRANSACTION;
INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `is_uncommitted`, `is_consistent`)
SELECT src.`id`, src.`int1`, src.`int1_key`, src.`int1_unique`,
src.`int2`, src.`int2_key`, src.`int2_unique`,
src.`for_update`, src.`is_uncommitted`, src.`is_consistent`
FROM t1 AS src
WHERE (src.`pk` BETWEEN 1000 AND 1049) AND (src.`id` > 0) AND (src.`is_consistent` = 1) AND (src.`int1_unique` MOD 8 = 0)
ON DUPLICATE KEY UPDATE `int1_unique`= src.`int1_unique` + CONNECTION_ID() + 1000, `int2_unique`= src.`int2_unique` - (CONNECTION_ID()+1000);
*** Updating id and connection_id if we actually inserted something (query log disabled)...
COMMIT;

View File

@@ -0,0 +1,13 @@
SET autocommit = 0;
START TRANSACTION;
*** All changes done in this test / transaction will be rolled back.
*** Disabling query log.
*** Executing UPDATE on rows 501 -- 549 with id = 50, single statement.
*** Executing UPDATE on rows 526 -- 549 with id = 50, single statement.
*** Executing UPDATE on rows 501 -- 525 with id = 50, multiple statements.
*** ROLLBACK
*** START TRANSACTION
*** Executing INSERTs of rows with id = 50, 2 statements.
*** Executing DELETE of rows with pk between 449 and 540, single statement.
*** Enabling query log.
ROLLBACK;

View File

@@ -0,0 +1,34 @@
SET autocommit = 0;
START TRANSACTION;
*** Disabling result log
SELECT @pk:=`pk`, @unique:=`int1_unique`
FROM t1
WHERE `pk` MOD 5 <> 0
AND `pk` > 200 + (CONNECTION_ID() MOD 1000)
AND `int1_unique` NOT IN (SELECT `int1_unique` FROM t1 WHERE (`pk` < 1000 AND `pk` MOD 5 = 0) OR `is_consistent` = 0)
AND -`int1_unique` NOT IN (SELECT `int2_unique` FROM t1 WHERE (`pk` < 1000 AND `pk` MOD 5 = 0) OR `is_consistent` = 0)
AND `is_consistent`= 1
LIMIT 1 FOR UPDATE;
*** Enabling result log
REPLACE INTO t1 SET `pk` = @pk,
`id` = 7,
`int1` = 7,
`int1_key` = -7,
`int1_unique` = @unique,
`int2` = -7,
`int2_key` = 7,
`int2_unique` = -@unique,
`connection_id` = CONNECTION_ID(),
`is_consistent` = 1;
COMMIT;
START TRANSACTION;
REPLACE INTO t1
SELECT * FROM t1
WHERE `pk` > 1000 + CONNECTION_ID() MOD 777
AND `int1_unique` NOT IN (SELECT `int1_unique` FROM t1 WHERE `pk` < 1000 OR `is_consistent` = 0)
AND `int2_unique` NOT IN (SELECT `int2_unique` FROM t1 WHERE `pk` < 1000 OR `is_consistent` = 0)
AND `pk` MOD 5 <> 0
AND `is_consistent` = 1
ORDER BY `pk` LIMIT 1;
*** Updating replaced row (if applicable)
COMMIT;

View File

@@ -0,0 +1,5 @@
SET autocommit = 0;
START TRANSACTION;
SELECT * FROM t1 WHERE `is_uncommitted` > 0;
pk id int1 int1_key int1_unique int2 int2_key int2_unique for_update timestamp connection_id thread_id is_uncommitted is_consistent
COMMIT;

View File

@@ -0,0 +1,72 @@
SET autocommit = 0;
START TRANSACTION;
Comparing results from 2 queries (unless we deadlock or some such)...
*** Query 1: SELECT * FROM t1 WHERE `pk` > 1000 ORDER BY `int1_key`
*** Disabling query log (we may deadlock and not do this after all)
*** Creating temp table with results from query 'SELECT * FROM t1 WHERE `pk` > 1000 ORDER BY `int1_key`' unless we deadlock or time out.
*** Enabling query log
*** Filler:
SELECT SLEEP(1);
SLEEP(1)
0
*** Query 2: SELECT * FROM t1 WHERE `pk` > 1000 ORDER BY `int1_key` LIMIT 987654321
*** Disabling query log (we may deadlock and not do this after all)
*** Creating temp table with results from query 'SELECT * FROM t1 WHERE `pk` > 1000 ORDER BY `int1_key` LIMIT 987654321' unless we deadlock or time out.
*** Enabling query log
*** Filler: Do something other than sleep while waiting for other transactions to do stuff...
CREATE TEMPORARY TABLE tmpSelectLimitNoLimit (a INT, b VARCHAR(255), c TIMESTAMP, KEY(a));
INSERT INTO tmpSelectLimitNoLimit VALUES
(-1, 'This is a filler', NOW()),
(0, 'More stuff', NOW()),
(999999999, 'Even more bogus data', NOW()),
(-98765, 'Even more bogus data', NOW());
SELECT * FROM tmpSelectLimitNoLimit WHERE a < -99999999 ORDER BY a;
a b c
*** Query 3: SELECT * FROM t1 WHERE `pk` > 1000 ORDER BY `int1_key` LIMIT 987654321
*** Disabling query log (we may deadlock and not do this after all)
*** Creating temp table with results from query 'SELECT * FROM t1 WHERE `pk` > 1000 ORDER BY `int1_key` LIMIT 987654321' unless we deadlock or time out.
*** Enabling query log
*** Filler:
UPDATE tmpSelectLimitNoLimit SET a = 3;
SELECT SLEEP(1);
SLEEP(1)
0
*** Query 4: SELECT * FROM t1 WHERE `pk` > 1000 ORDER BY `int1_key`
*** Disabling query log (we may deadlock and not do this after all)
*** Creating temp table with results from query 'SELECT * FROM t1 WHERE `pk` > 1000 ORDER BY `int1_key`' unless we deadlock or time out.
*** Enabling query log
***************************************************************************
* Checking REPEATABLE READ by comparing result sets from same transaction
***************************************************************************
*** Query log disabled. See include files used by test for query details.
*** Comparing query 1 (A) with query 2 (B):
###########################
# Detect missing rows:
###########################
A.pk B.pk A.id B.id A.int1 B.int1 A.int1_key B.int1_key A.int1_unique B.int1_unique A.int2 B.int2 A.int2_key B.int2_key A.int2_unique B.int2_unique A.for_update B.for_update A.timestamp B.timestamp A.connection_id B.connection_id A.thread_id B.thread_id A.is_uncommitted B.is_uncommitted A.is_consistent B.is_consistent
###########################
# Detect changed rows:
###########################
A.pk B.pk A.id B.id A.int1 B.int1 A.int1_key B.int1_key A.int1_unique B.int1_unique A.int2 B.int2 A.int2_key B.int2_key A.int2_unique B.int2_unique A.for_update B.for_update A.timestamp B.timestamp A.connection_id B.connection_id A.thread_id B.thread_id A.is_uncommitted B.is_uncommitted A.is_consistent B.is_consistent
*** Comparing query 2 (A) with query 3 (B):
###########################
# Detect missing rows:
###########################
A.pk B.pk A.id B.id A.int1 B.int1 A.int1_key B.int1_key A.int1_unique B.int1_unique A.int2 B.int2 A.int2_key B.int2_key A.int2_unique B.int2_unique A.for_update B.for_update A.timestamp B.timestamp A.connection_id B.connection_id A.thread_id B.thread_id A.is_uncommitted B.is_uncommitted A.is_consistent B.is_consistent
###########################
# Detect changed rows:
###########################
A.pk B.pk A.id B.id A.int1 B.int1 A.int1_key B.int1_key A.int1_unique B.int1_unique A.int2 B.int2 A.int2_key B.int2_key A.int2_unique B.int2_unique A.for_update B.for_update A.timestamp B.timestamp A.connection_id B.connection_id A.thread_id B.thread_id A.is_uncommitted B.is_uncommitted A.is_consistent B.is_consistent
*** Comparing query 3 (A) with query 4 (B):
###########################
# Detect missing rows:
###########################
A.pk B.pk A.id B.id A.int1 B.int1 A.int1_key B.int1_key A.int1_unique B.int1_unique A.int2 B.int2 A.int2_key B.int2_key A.int2_unique B.int2_unique A.for_update B.for_update A.timestamp B.timestamp A.connection_id B.connection_id A.thread_id B.thread_id A.is_uncommitted B.is_uncommitted A.is_consistent B.is_consistent
###########################
# Detect changed rows:
###########################
A.pk B.pk A.id B.id A.int1 B.int1 A.int1_key B.int1_key A.int1_unique B.int1_unique A.int2 B.int2 A.int2_key B.int2_key A.int2_unique B.int2_unique A.for_update B.for_update A.timestamp B.timestamp A.connection_id B.connection_id A.thread_id B.thread_id A.is_uncommitted B.is_uncommitted A.is_consistent B.is_consistent
COMMIT;

View File

@@ -0,0 +1,26 @@
SET autocommit = 0;
START TRANSACTION;
*** Disabling query log (we may deadlock and not do this after all)
*** Creating temp table with results from query 'SELECT * from t1' unless we deadlock or time out.
*** Enabling query log
SELECT SLEEP(1);
SLEEP(1)
0
*** Disabling query log (we may deadlock and not do this after all)
*** Creating temp table with results from query 'SELECT * from t1' unless we deadlock or time out.
*** Enabling query log
***************************************************************************
* Checking REPEATABLE READ by comparing result sets from same transaction
***************************************************************************
*** Query log disabled. See include files used by test for query details.
*** Comparing query 1 (A) with query 2 (B):
###########################
# Detect missing rows:
###########################
A.pk B.pk A.id B.id A.int1 B.int1 A.int1_key B.int1_key A.int1_unique B.int1_unique A.int2 B.int2 A.int2_key B.int2_key A.int2_unique B.int2_unique A.for_update B.for_update A.timestamp B.timestamp A.connection_id B.connection_id A.thread_id B.thread_id A.is_uncommitted B.is_uncommitted A.is_consistent B.is_consistent
###########################
# Detect changed rows:
###########################
A.pk B.pk A.id B.id A.int1 B.int1 A.int1_key B.int1_key A.int1_unique B.int1_unique A.int2 B.int2 A.int2_key B.int2_key A.int2_unique B.int2_unique A.for_update B.for_update A.timestamp B.timestamp A.connection_id B.connection_id A.thread_id B.thread_id A.is_uncommitted B.is_uncommitted A.is_consistent B.is_consistent
COMMIT;

View File

@@ -0,0 +1,6 @@
SELECT @sum:=SUM(`int1` + `int1_key` + `int1_unique`
+ `int2` + `int2_key` + `int2_unique`)
AS TotalSum
FROM t1;
TotalSum
0

View File

@@ -0,0 +1,153 @@
SET autocommit = 0;
*** Move record out of locked portion of index:
START TRANSACTION;
*** Disabling result log (result will vary)
SELECT * FROM t1 WHERE `int1_key` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_key`, `pk` LIMIT 10 FOR UPDATE;
SELECT @pk:=`pk` FROM t1 WHERE `int1_key` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_key`, `pk` LIMIT 1;
*** Enabling result log
UPDATE t1 SET `int1_key` = `int1_key` + 50,
`int2_key` = `int2_key` - 50,
`id` = 10,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
*** Move record out of locked portion of UNIQUE index:
START TRANSACTION;
*** Disabling result log (result will vary)
SELECT * FROM t1 WHERE `int1_unique` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_unique` LIMIT 10 FOR UPDATE;
SELECT @pk:=`pk` FROM t1 WHERE `int1_unique` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_unique` LIMIT 1;
*** Enabling result log
UPDATE t1 SET `int1_unique` = `int1_unique` + 50 + CONNECTION_ID(),
`int2_unique` = `int2_unique` - 50 - CONNECTION_ID(),
`id` = 11,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
*** Move record into locked portion of index:
START TRANSACTION;
*** Disabling result log (result will vary)
SELECT * FROM t1 WHERE `int1_key` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_key`, `pk` LIMIT 10 FOR UPDATE;
SELECT @pk:=`pk` FROM t1 WHERE `int1_key` > 1030 ORDER BY `int1_key`, `pk` LIMIT 1;
*** Enabling result log
UPDATE t1 SET `int1_key` = `int1_key` + 50,
`int2_key` = `int2_key` - 50,
`id` = 12,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
*** Move record inside locked portion of index (move it but stay inside the locked range):
START TRANSACTION;
*** Disabling result log (result will vary)
SELECT * FROM t1 WHERE `int1_key` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_key`, `pk` LIMIT 10 FOR UPDATE;
SELECT @pk:=`pk` FROM t1 WHERE `int1_key` BETWEEN 981 + 10 + (CONNECTION_ID() MOD 15) AND 1019 ORDER BY `int1_key`, `pk` LIMIT 1;
*** Enabling result log
UPDATE t1 SET `int1_key` = `int1_key` - 10,
`int2_key` = `int2_key` + 10,
`id` = 13,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
*** Move record outside existing index boundary (max):
START TRANSACTION;
*** Disabling result log (results will vary)
SELECT @max:=MAX(`int2_key`), @pk:=FLOOR(1 + RAND() * (MAX(`pk`) - 1)) FROM t1;
SELECT * FROM t1 WHERE `pk` = @pk FOR UPDATE;
SELECT @old:=`int2_key`, (@sum:=`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique`) FROM t1 WHERE `pk` = @pk;
*** Enabling result log
UPDATE t1 SET `int2_key` = @max + 1,
`int2` = `int2` - (@max + 1 - @old),
`id` = 14,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(@sum = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
*** Move record outside existing UNIQUE index boundary (max):
START TRANSACTION;
*** Disabling result log (results will vary)
SELECT @max:=MAX(`int2_unique`), @pk:=FLOOR(1 + RAND() * (MAX(`pk`) - 1)) FROM t1;
SELECT * FROM t1 WHERE `pk` = @pk FOR UPDATE;
SELECT @old:=`int2_unique`, (@sum:=`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique`) FROM t1 WHERE `pk` = @pk;
*** Enabling result log
UPDATE t1 SET `int2_unique` = @max + 1,
`int2` = `int2` - (@max + 1 - @old),
`id` = 15,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(@sum = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
*** Move record outside existing index boundary (min):
START TRANSACTION;
*** Disabling result log (results will vary)
SELECT @min:=MIN(`int1_key`), @pk:=FLOOR(1 + RAND() * (MAX(`pk`) - 1)) FROM t1;
SELECT * FROM t1 WHERE `pk` = @pk FOR UPDATE;
SELECT @old:=`int1_key`, (@sum:=`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique`) FROM t1 WHERE `pk` = @pk;
*** Enabling result log
UPDATE t1 SET `int1_key` = @min - 1,
`int1` = `int1` - (@min - 1 - @old),
`id` = 16,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(@sum = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
*** Move record outside existing UNIQUE index boundary (min):
START TRANSACTION;
*** Disabling result log (results will vary)
SELECT @min:=MIN(`int1_unique`), @pk:=FLOOR(1 + RAND() * (MAX(`pk`) - 1)) FROM t1;
SELECT * FROM t1 WHERE `pk` = @pk FOR UPDATE;
SELECT @old:=`int1_unique`, (@sum:=`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique`) FROM t1 WHERE `pk` = @pk;
*** Enabling result log
UPDATE t1 SET `int1_unique` = @min - 1,
`int1` = `int1` - (@min - 1 - @old),
`id` = 17,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(@sum = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
*** Move record forward in index (add some number):
START TRANSACTION;
UPDATE t1 SET `int2_key` = `int2_key` + 16,
`int2` = `int2` - 16,
`id` = 18,
`connection_id` = CONNECTION_ID(),
`thread_id` = 0
WHERE `pk` = CONNECTION_ID() MOD 1000;
*** Move record backward in index (subtract some number):
UPDATE t1 SET `int1_key` = `int1_key` - 16,
`int1` = `int1` + 16,
`id` = 18,
`connection_id` = CONNECTION_ID(),
`thread_id` = 0
WHERE `pk` = CONNECTION_ID() + 16 MOD 1000;
COMMIT;

View File

@@ -0,0 +1,153 @@
SET autocommit = 0;
*** Move record out of locked portion of index:
START TRANSACTION;
*** Disabling result log (result will vary)
SELECT * FROM t1 WHERE `int1_key` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_key`, `pk` FOR UPDATE;
SELECT @pk:=`pk` FROM t1 WHERE `int1_key` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_key`, `pk` LIMIT 1;
*** Enabling result log
UPDATE t1 SET `int1_key` = `int1_key` + 50,
`int2_key` = `int2_key` - 50,
`id` = 10,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
*** Move record out of locked portion of UNIQUE index:
START TRANSACTION;
*** Disabling result log (result will vary)
SELECT * FROM t1 WHERE `int1_unique` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_unique` FOR UPDATE;
SELECT @pk:=`pk` FROM t1 WHERE `int1_unique` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_unique` LIMIT 1;
*** Enabling result log
UPDATE t1 SET `int1_unique` = `int1_unique` + 50 + CONNECTION_ID(),
`int2_unique` = `int2_unique` - 50 - CONNECTION_ID(),
`id` = 11,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
*** Move record into locked portion of index:
START TRANSACTION;
*** Disabling result log (result will vary)
SELECT * FROM t1 WHERE `int1_key` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_key`, `pk` FOR UPDATE;
SELECT @pk:=`pk` FROM t1 WHERE `int1_key` > 1030 ORDER BY `int1_key`, `pk` LIMIT 1;
*** Enabling result log
UPDATE t1 SET `int1_key` = `int1_key` + 50,
`int2_key` = `int2_key` - 50,
`id` = 12,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
*** Move record inside locked portion of index (move it but stay inside the locked range):
START TRANSACTION;
*** Disabling result log (result will vary)
SELECT * FROM t1 WHERE `int1_key` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_key`, `pk` FOR UPDATE;
SELECT @pk:=`pk` FROM t1 WHERE `int1_key` BETWEEN 981 + 10 + (CONNECTION_ID() MOD 15) AND 1019 ORDER BY `int1_key`, `pk` LIMIT 1;
*** Enabling result log
UPDATE t1 SET `int1_key` = `int1_key` - 10,
`int2_key` = `int2_key` + 10,
`id` = 13,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
*** Move record outside existing index boundary (max):
START TRANSACTION;
*** Disabling result log (results will vary)
SELECT @max:=MAX(`int2_key`), @pk:=FLOOR(1 + RAND() * (MAX(`pk`) - 1)) FROM t1;
SELECT * FROM t1 WHERE `pk` = @pk FOR UPDATE;
SELECT @old:=`int2_key`, (@sum:=`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique`) FROM t1 WHERE `pk` = @pk;
*** Enabling result log
UPDATE t1 SET `int2_key` = @max + 1,
`int2` = `int2` - (@max + 1 - @old),
`id` = 14,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(@sum = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
*** Move record outside existing UNIQUE index boundary (max):
START TRANSACTION;
*** Disabling result log (results will vary)
SELECT @max:=MAX(`int2_unique`), @pk:=FLOOR(1 + RAND() * (MAX(`pk`) - 1)) FROM t1;
SELECT * FROM t1 WHERE `pk` = @pk FOR UPDATE;
SELECT @old:=`int2_unique`, (@sum:=`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique`) FROM t1 WHERE `pk` = @pk;
*** Enabling result log
UPDATE t1 SET `int2_unique` = @max + 1,
`int2` = `int2` - (@max + 1 - @old),
`id` = 15,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(@sum = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
*** Move record outside existing index boundary (min):
START TRANSACTION;
*** Disabling result log (results will vary)
SELECT @min:=MIN(`int1_key`), @pk:=FLOOR(1 + RAND() * (MAX(`pk`) - 1)) FROM t1;
SELECT * FROM t1 WHERE `pk` = @pk FOR UPDATE;
SELECT @old:=`int1_key`, (@sum:=`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique`) FROM t1 WHERE `pk` = @pk;
*** Enabling result log
UPDATE t1 SET `int1_key` = @min - 1,
`int1` = `int1` - (@min - 1 - @old),
`id` = 16,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(@sum = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
*** Move record outside existing UNIQUE index boundary (min):
START TRANSACTION;
*** Disabling result log (results will vary)
SELECT @min:=MIN(`int1_unique`), @pk:=FLOOR(1 + RAND() * (MAX(`pk`) - 1)) FROM t1;
SELECT * FROM t1 WHERE `pk` = @pk FOR UPDATE;
SELECT @old:=`int1_unique`, (@sum:=`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique`) FROM t1 WHERE `pk` = @pk;
*** Enabling result log
UPDATE t1 SET `int1_unique` = @min - 1,
`int1` = `int1` - (@min - 1 - @old),
`id` = 17,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(@sum = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
*** Move record forward in index (add some number):
START TRANSACTION;
UPDATE t1 SET `int2_key` = `int2_key` + 16,
`int2` = `int2` - 16,
`id` = 18,
`connection_id` = CONNECTION_ID(),
`thread_id` = 0
WHERE `pk` = CONNECTION_ID() MOD 1000;
*** Move record backward in index (subtract some number):
UPDATE t1 SET `int1_key` = `int1_key` - 16,
`int1` = `int1` + 16,
`id` = 18,
`connection_id` = CONNECTION_ID(),
`thread_id` = 0
WHERE `pk` = CONNECTION_ID() + 16 MOD 1000;
COMMIT;

View File

@@ -0,0 +1,11 @@
SET autocommit = 0;
START TRANSACTION;
SET @conn_id = CONNECTION_ID(), @thread_id = 0;
UPDATE t1 SET `int1` = `int1` - 4,
`id` = 4,
`is_consistent` = 0,
`connection_id` = @conn_id,
`thread_id` = @thread_id
WHERE `pk` = 4;
*** UPDATEing row with pk = 4 and `int1_key` +=4 if above statement succeeded (query log disabled)...
COMMIT;

View File

@@ -0,0 +1,14 @@
rr_u_10-19_nolimit
rr_u_4
rr_id_3
rr_i_40-44
rr_id_900
rr_iud_rollback-multi-50
rr_insert_select_2
rr_replace_7-8
rr_sc_sum_total
rr_sc_select-same_2
rr_sc_select-limit-nolimit_4
rr_s_select-uncommitted
rr_c_count_not_zero

View File

@@ -0,0 +1,340 @@
#!/usr/bin/perl
################################################################################
#
# This script runs the transactional stress test "stress_tx_rr" against the
# transactional storage engine and looks for errors in two log files:
# var/stress/<timestamp>/mysql-stress-test.log
# var/log/master.err
#
# The script assumes current working dir is mysql-test/.
#
# Regarding the server error log, currently only error lines containing the
# string "Error:" will be reported as a critical error, in addition to signs
# of crashes.
#
# In the stress test log, all lines matching the regex "S\d:" (denoting an
# error with a specified severity) will be reported as errors.
#
# Error information including the full server log in the case of server crash
# is output to standard out.
#
# This script is and should be silent if no errors are detected.
#
################################################################################
use File::Find;
use File::Spec;
use Cwd;
use Cwd 'abs_path';
use Getopt::Long;
# Checking script is run from the correct location
if (! -f "mysql-test-run.pl") {
print("\nERROR: This script should be run from the \'\<INSTALL_DIR\>/mysql-test\' directory.\n");
error(1);
}
$runlog="rr_trx.log";
my $errorFound;
my $installdir=abs_path(File::Spec->updir());
my $f=abs_path($0);
my ($v,$d,$f)=File::Spec->splitpath($f);
my $testsuitedir=$v.$d;
################################################################################
# Run stress test, redirect output to tmp file.
# Duration is specified in seconds. Some nice values:
# 5 minutes = 300
# 30 minutes = 1800
# 1 hour = 3600
# 2 hours = 7200
# 5 hours = 18000
# 12 hours = 43200
#
################################################################################
$opt_duration=600;
# Special handling for the InnoDB plugin
$plugin_params="\"--plugin-load=innodb=ha_innodb_plugin.so;innodb_trx=ha_innodb_plugin.so;innodb_locks=ha_innodb_plugin.so;innodb_cmp=ha_innodb_plugin.so;innodb_cmp_reset=ha_innodb_plugin.so;innodb_cmpmem=ha_innodb_plugin.so;innodb_cmpmem_reset=ha_innodb_plugin.so\"";
$plugin_params=~s/so/dll/g if (windows());
$opt_help="";
$opt_try="";
$opt_engine="";
$opt_threads=10;
# Collection command line options
GetOptions("engine:s" => \$opt_engine,
"duration=i" => \$opt_duration,
"threads=i" => \$opt_treads,
"try", "help") || usage();
if ($opt_help) { usage(); }
if (!$opt_engine) {
print("\nERROR: --engine=\<engine\> argument is required!!!\n");
usage();
}
# setting specific engine parameters
$engine_options="";
# for innodb engine
if ($opt_engine eq "InnoDB") {
$engine_options=
"--mysqld=--innodb " .
"--mysqld=--innodb-lock-wait-timeout=2 " .
" ";
}
elsif ($opt_engine eq "InnoDB_plugin") {
$engine_options=
"--mysqld=--innodb " .
"--mysqld=--ignore-builtin-innodb " .
#"--mysqld=--plugin_dir=".$installdir."/lib " .
"--mysqld=--plugin_dir=".$installdir."/storage/innodb_plugin/.libs " .
"--mysqld=--innodb-lock-wait-timeout=2 " .
"--mysqld=".$plugin_params." " .
" ";
}
# add parameters for a new engine by modifying the 'elsif' section below
elsif ($opt_engine eq "zz") {
$engine_options=
" ";
}
else {
print("\nERROR: '".$opt_engine."' - unknown engine\n");
add_engine_help();
}
# From this point forward there is no difference between the build in InnDB and the plugin
$opt_engine='InnoDB' if ($opt_engine eq 'InnoDB_plugin');
# checking that custom files for that engine exist
$engine_lower= lc($opt_engine);
$missing=0;
if (!-f $testsuitedir.'init_'.$engine_lower.'.txt') {
print("\nERROR: config file 'init_".$engine_lower.".txt' missing.");
$missing=1;
}
if (!-f $testsuitedir.'t/init_'.$engine_lower.'.test') {
print("\nERROR: config file 'init_".$engine_lower.".test' missing.");
$missing=1;
}
if (!-f $testsuitedir.'r/init_'.$engine_lower.'.result') {
print("\nERROR: config file 'init_".$engine_lower.".result' missing.");
$missing=1;
}
add_engine_help() if ($missing);
# bilding test command line
$cmd="MTR_VERSION=1 " .
"perl ./mysql-test-run.pl " .
"--comment=stress_tx_rr_".$opt_engine." " .
"--stress " .
"--stress-init-file=init_".$engine_lower.".txt " .
"--stress-test-file=run.txt " .
"--stress-suite=engines/rr_trx " .
"--stress-test-duration=".$opt_duration." " .
"--stress-threads=".$opt_threads." " .
"--mysqld=--log-output=file " .
"--mysqld=--sql-mode=no_engine_substitution " .
"--skip-im " .
"--skip-ndb " .
$engine_options .
" > ".$runlog." 2>&1";
# running the test
print("\n Running \'rr_trx\' test with ".$opt_threads." clients\n");
print(" for ".$opt_duration." seconds using the ".$opt_engine." storag engine.\n");
print("\n Log file: ".$runlog."\n");
if ($opt_try) {
print("\nThe following command will execute:\n");
print("$cmd\n\n");
exit(0);
}
system $cmd;
################################################################################
# Check for crash and other severe errors in the server log.
#
################################################################################
# Open log file. If MTR_VERSION=1 this is in var/log/master.err.
# Otherwise, it is in ?... [stress_tx_rr not yet runnable with MTR_VERSION=2]
# Assuming current directory mysql-test/
my $serverlog=getcwd() . "/var/log/master.err";
open(SERVERLOG, $serverlog)
or die "Unable to open $serverlog. Test not run?";
my @servererrors = (); # Lines with "Severe" errors in server error log
my @crash = (); # Empty if no stack trace detected, non-empty otherwise.
# Grep for errors and crashes. Going line-by-line since the file can be large.
while (<SERVERLOG>) {
$line = $_;
push @crash, $line if /This could be because you hit a bug/;
push @servererrors, $line if /Error:/;
}
close(SERVERLOG);
if (@crash) {
# Crash (stack trace) detected in server log.
print "Transactional stress test stress_tx_rr:\n\n";
print "SERVER CRASH DETECTED!\n";
print "Server log: $serverlog printed at the bottom of this log.\n\n";
print "########################################################\n\n";
}
if (@servererrors) {
# "Severe" errors detected. Print error lines to std out
print "CRITICAL ERRORS:\n\n";
foreach $error (@servererrors) {
print $error;
}
print "\n########################################################\n\n";
}
################################################################################
# Check for errors reported by mysql-stress-test.pl. Transactional consistency
# issues are shown as result diffs.
################################################################################
my $dir;
find(\&finddir, cwd); # sets variable $dir
# Open log file
my $logfile="$dir/mysql-stress-test.log";
open(LOGFILE, $logfile)
or die "Unable to open $logfile. Test not run?";
my @errors = ();
my @heading = ();
# Grep for errors. Going line-by-line since the file can be large.
while (<LOGFILE>) {
#push @errors, $_ if ! /No Errors/;
push @errors, $_ if /S\d:/;
push @heading, $_ if /TestID|=====/;
}
close(LOGFILE);
# Print all errors, i.e. all lines that do not contain the string "No Errors"
if (@errors) {
$errorFound = 1;
print "Stress test main log file: $logfile\n";
print "Errors follow:\n\n";
# First print the heading
foreach $header_line (@heading) {
print $header_line;
}
foreach $error (@errors) {
print $error;
}
}
# If errors in server log, output the log and exit 1?
if (@servererrors or @crash) {
$errorFound = 1;
print "\n########################################################\n\n";
print "Server error log (master.err):\n\n";
open(SERVERLOG, $serverlog)
or die "Unable to open $serverlog!";
while (<SERVERLOG>) {
print $_;
}
close(SERVERLOG);
}
if ($errorFound) {
# Exit with error code != 0 if we found an error.
print("\nTest Completed with errors. \n");
print(" - See ".$runlog." for summary.\n");
print(" - See files under var/stress for details.\n");
exit 1;
}
print("\nTest Completed - See ".$runlog." for details\n");
################################################################################
# Helper routines etc.
#
################################################################################
sub finddir {
my $file = $File::Find::name; # complete path to the file
return unless -d $file; # process directories (-d), not files (-f)
return unless $_ =~ m/^\d{14}$/; # check if file matches timstamp regex,
# must be 14 digits
$dir=$file;
#$dir= $_; # $_ = just the file name, no path
return $_;
}
sub usage
{
print <<EOF;
SYNTAX $0 --engine=<engine> [--duration=<nn>] [--thread=<nn>] [--try]
--engine=<engine>
The engine used to run the test. \<engine\> needs to be provided exactly as
it is reprted in the SHOW ENGINES comand.
EXCEPTION: In order to use the InnoDB plugin, specify 'InnoDB_plugin'
Required option.
--duration=nn
The time the test should run for in seconds. Defaut value is 600 seconds (10 minutes).
Optional parameter
--threads=nn
The number of clients used by the test driver. Defaut value is 10.
Optional parameter
--try
Do not run the actual test but show what will be run
Optional parameter
EOF
exit(0);
}
sub add_engine_help
{
print <<EOF;
\nThis test is can be run against any transactional engine. However scripts need to be modifed in order
to support such engines (support to InnoDB is provided as an example).
In order to add support for a new engine, you will need to modify scripts as follows:
1) cd to INSTALL_DIR/mysql-test/suite/engines/rr_trx
2) Modify the 'run_stress_rr.pl' file by adding an 'elsif' section for your engine and have it
include specifc values required to be passed as startup parameters to the MySQL server by
specifying them using "--mysqld" options (see InnoDB example).
3) Copy the 'init_innodb.txt' file to 'init_<engine>.txt file and change its content to be "init_<engine>".
4) In the 't' directory copy the "init_innodb.test" file to "init_\<engine\>.test" and change the value of
the '\$engine' variable to \<engine\>.
5) In the 'r' directory copy "the init_innodb.result" file to "init_\<engine\>.result" and change refrences
to 'InnoDB' to \<engine\>.
EOF
exit(0);
}
sub windows {
if (
($^O eq 'MSWin32') ||
($^O eq 'MSWin64')
) {
return 1;
} else {
return 0;
}
}

View File

@@ -0,0 +1,5 @@
--source include/have_innodb.inc
let $engine = 'InnoDB';
--source suite/engines/rr_trx/include/rr_init.test

View File

@@ -0,0 +1,28 @@
################################################################################
#
# Verify that SELECT COUNT(*) FROM t1 is never 0.
# This should hold because we take care not to delete all rows from that table.
#
# There have been bugs in the past where this query sporadically has returned
# 0 for non-empty tables.
#
################################################################################
SET autocommit = 0;
START TRANSACTION;
--echo *** Running query: SELECT COUNT(*) FROM t1
let $count= `SELECT COUNT(*) FROM t1`;
if (!$count)
{
# count was zero (0)
--echo FAIL! SELECT COUNT(*) returned 0 - this should neven happen.
}
# Check that the count of rows with pk divisible by 5 is constant.
# This should hold because
# a) We do not delete rows with pk MOD 5 = 0
# b) We cannot insert new pk's below the initial auto_increment counter (see init test).
SELECT COUNT(*) FROM t1 WHERE `pk` MOD 5 = 0 AND `pk` BETWEEN 1 AND 1000;
--sleep 1
SELECT COUNT(*) FROM t1 WHERE `pk` MOD 5 = 0 AND `pk` BETWEEN 1 AND 1000;
COMMIT;

View File

@@ -0,0 +1,5 @@
# We want to check the number of deadlocks, lock wait timeouts, etc. that have
# been counted so far.
#
SELECT * FROM statistics;

View File

@@ -0,0 +1,133 @@
################################################################################
#
# INSERT
#
# INSERT and commit new rows, using the constants "40" for most values.
# For each new transaction, the constant is increased by 1.
#
# This test runs a number of consecutive transactions to generate high
# concurrency:
#
# Tx 1:
# - multi-statemement insert, inserting first positive then negative number (0-sum).
#
# Tx 2:
# - insert multiple rows using a single statement.
#
# Tx 3:
# - INSERT IGNORE using both known duplicate values and non-duplicates.
#
# Net effect: 6 more rows
#
# In this test we need some kind of valid unique integer value for the columns
# with unique indexes.
#
# Alternatively:
# - Set unique value as 0 and rollback if ERR_DUP_KEY (see
# check_error_rollback.inc), then make sure to UPDATE where unique value is 0
# in other tests.
# - OR: insert NULL (requires special handling when calculating row sums in
# other tests).
# - OR: skip unique indexes entirely (except `pk`) (remove from t1 in init).
#
# Using CONNECTION_ID (swithcing sign and doing +/- 3) as unique value, meaning
# that some of the INSERTs will fail with duplicate key until this is high
# enough (should not take long with a relatively high number of threads and some
# duration, given that the number of initial rows is relatively low, ~1000).
# Let's just say this is a warm-up period.
#
# Alternatively, add some random integer to the value or use UNIX_TIMESTAMP()
# (the latter requires that some care is taken in subsequent updates etc. For
# example, simply doubling the value will cause overflow/truncation).
#
# No need to ROLLBACK if all statements in a transaction by themselves are
# consistent.
#
#
################################################################################
SET autocommit = 0;
START TRANSACTION;
--echo
--echo *** multi-statemement insert, inserting first positive then negative number:
--echo
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_DUP_ENTRY
INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (40, 40, 40, CONNECTION_ID(),
-40, -40, -CONNECTION_ID(),
0, CONNECTION_ID(), 0, 0, 1);
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_DUP_ENTRY
INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (-40, -40, -40, -CONNECTION_ID(),
40, 40, CONNECTION_ID(),
0, CONNECTION_ID(), 0, 0, 1);
COMMIT;
START TRANSACTION;
--echo
--echo *** insert multiple rows using a single statement:
--echo
# First row is by itself consistent (sum = 0). Row 3 zero-sums row 2, so the
# statement itself is consistent.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_DUP_ENTRY
INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (41, 41, 41, CONNECTION_ID()+1,
-41, -41, -(CONNECTION_ID()+1),
0, CONNECTION_ID(), 0, 0, 1),
(41, 41, 41, CONNECTION_ID()+2,
41, 41, CONNECTION_ID()+2,
0, CONNECTION_ID(), 0, 0, 0),
(41, -41, -41, -(CONNECTION_ID()+2),
-41, -41, -(CONNECTION_ID()+2),
0, CONNECTION_ID(), 0, 0, 0);
COMMIT;
START TRANSACTION;
--echo
--echo *** INSERT IGNORE using both known duplicate values and non-duplicates:
--echo
# This MAY be discarded (duplicate entry in UNIQUE index) - should succeed if CONNECTION_ID is high enough (int*_unique).
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_DUP_ENTRY
INSERT IGNORE INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (42, 42, 42, CONNECTION_ID()+3,
-42, -42, -(CONNECTION_ID()+3),
0, CONNECTION_ID(), 0, 0, 1);
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT
# This WILL (SHOULD) be discarded (duplicate entry as primary key).
# pk's that are 1000 or less AND divisible by 5 should all be present (i.e. never deleted), so we pick pk 5.
# Note that we insert an inconsistent row, so it will show up as a sum anomaly if it succeeds.
INSERT IGNORE INTO t1 (`pk`, `id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (5, 43, 42, 42, CONNECTION_ID(),
-42, -42, CONNECTION_ID(),
0, CONNECTION_ID(), 0, 0, 0);
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT
# This MAY be discarded (duplicate entry in UNIQUE index).
INSERT IGNORE INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (44, 42, 42, (CONNECTION_ID() + 1000) MOD 5000,
-42, -42, -((CONNECTION_ID() + 1000) MOD 5000),
0, CONNECTION_ID(), 0, 0, 1);
--source suite/engines/rr_trx/include/check_for_error_rollback.inc
COMMIT;

View File

@@ -0,0 +1,31 @@
################################################################################
#
# Insert a new record. Then delete the same record, in the same tx.
# Mind any tx errors, ROLLBACK if needed to "equalize" different engines.
#
################################################################################
SET autocommit = 0;
START TRANSACTION;
# Flag to indicate error (use this to decide if we are going to delete after insert).
let $error= 0;
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT
eval INSERT INTO t1 (`id`, `int1`, `connection_id`, `is_uncommitted`)
VALUES (3, 3, CONNECTION_ID(), 1);
--source suite/engines/rr_trx/include/check_for_error_rollback.inc
if(!$error)
{
# Conditional, so skip query log
--disable_query_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT
DELETE FROM t1 WHERE `pk` = (SELECT @@last_insert_id) AND `id` = 3;
--source suite/engines/rr_trx/include/check_for_error_rollback.inc
--enable_query_log
}
COMMIT;

View File

@@ -0,0 +1,155 @@
################################################################################
#
# DELETE
#
# DELETE existing rows and commit. Above 900 (initial insert inserted 1000 pk's).
#
# DO NOT DELETE ROWS WITH (PK MOD 5) = 0 (PK's evenly divisible by 5). We
# count these as a consistency check in other tests.
#
# This test runs a number of consecutive transactions (to allow for high
# concurrency):
#
# Tx 1:
# - DELETE a row and INSERT it immediately with same pk.
# Tx 2:
# - DELETE a row and INSERT it immediately with pk = NULL.
#
# Tx 3:
# - DELETE two rows so that the total table sum does not change.
#
# Net effect: 2 fewer rows (if no errors)
#
# Roll back the entire transaction if a statement upon subsequent statements
# depend result in error. This is to maintain consistency (zero-sum tx).
#
################################################################################
SET autocommit = 0;
START TRANSACTION;
--echo
--echo *** Delete a row and re-insert with same `pk`:
--echo
#
# Get pk and unique ints of an existing row that is internally consistent.
# (Note: If any int field values may be NULL, handle this by e.g. using COALESCE)
# Re-using unique values in an effort to avoid rollbacks due to duplicate keys.
#
# NOTE: Because we maintain 0-sum consistency by "cancelling out" the deleted row with a new
# row later, we need to make sure we knoe exactly what we are deleting, thus using
# FOR UPDATE clause.
--echo *** Disabling result log
--disable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT @pk:=`pk`,
@unique1:=`int1_unique`,
@unique2:=`int2_unique`
FROM t1 WHERE `pk` MOD 5 = 4 AND `pk` > 900 AND `is_consistent` = 1 LIMIT 1 FOR UPDATE;
--echo *** Enabling result log
--enable_result_log
--source suite/engines/rr_trx/include/check_for_error_rollback_skip.inc
# Delete the row. We maintain 0-sum tx consistency by inserting ints that will cancel out
# the unique values (which we are keeping) later in this transaction.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
DELETE FROM t1 WHERE `pk` = @pk;
--source suite/engines/rr_trx/include/check_for_error_rollback_skip.inc
--echo *** Doing insert of row with pk = @pk if above statement succeeded (query log disabled)...
# Note that affected rows may be 0 if some other thread changed the row in the meantime - still
# we get no error. We work around this by using FOR UPDATE to lock the row (see above).
if(!$error)
{
# Insert a new row with the same sum of integers.
# This is conditional, so skip it the query log.
--disable_query_log
# If some other thread is doing the same thing at the same time, we may get duplicate key error
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_DUP_ENTRY
INSERT INTO t1 (`pk`, `id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (@pk, 900, 900, -900, @unique1,
-(@unique1+@unique2), 0, @unique2,
0, CONNECTION_ID(), 0, 0, 1);
--source suite/engines/rr_trx/include/check_for_error_rollback_skip.inc
--enable_query_log
}
COMMIT;
########################
# TRANSACTION 2
########################
START TRANSACTION;
--echo
--echo *** Delete a row and re-insert with `pk` = NULL:
--echo
--echo *** Disabling result log
--disable_result_log
# In order not to pick the same row as above, try to pick a different pk.
# Use FOR UPDATE to make sure we maintain 0-sum consistency througout the transaction.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT @pk:=`pk`,
@unique1:=`int1_unique`,
@unique2:=`int2_unique`
FROM t1 WHERE `pk` MOD 5 = 4 AND `pk` > 901 AND `is_consistent` = 1 LIMIT 1 FOR UPDATE;
--echo *** Enabling result log
--enable_result_log
--source suite/engines/rr_trx/include/check_for_error_rollback_skip.inc
# Delete the row. We maintain 0-sum tx consistency by inserting ints that will cancel out
# the unique values (which we are keeping) later in this transaction.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
DELETE FROM t1 WHERE `pk` = @pk;
--source suite/engines/rr_trx/include/check_for_error_rollback.inc
--echo *** Doing insert of row with pk = NULL if above statement succeeded (query log disabled)...
if(!$error)
{
# Insert a new row with the same sum of integers.
# This is conditional, so skip it the query log.
--disable_query_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_DUP_ENTRY
INSERT INTO t1 (`pk`, `id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (NULL, 901, 901, -901, @unique1,
-(@unique1+@unique2), 0, @unique2,
0, CONNECTION_ID(), 0, 0, 1);
--source suite/engines/rr_trx/include/check_for_error_rollback.inc
--enable_query_log
}
COMMIT;
########################
# TRANSACTION 3
########################
START TRANSACTION;
# By identifying rows with total row sum = 0, we know that deleting such rows
# won't affect the total table sum (used for consistency check).
# Such rows should have been marked with `is_consistent` = 0;
--echo
--echo *** Delete up to two (2) "consistent" rows (zero-sum; table sum unchanged)
--echo
# We get Warning "1592: Statement is not safe to log in statement mode" with server 5.1
# due to LIMIT (see Bug#42415 and Bug#42851).
--disable_warnings
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
DELETE FROM t1 WHERE `pk` > 902 AND `pk` MOD 5 = 3 AND `is_consistent` = 1 LIMIT 2;
--enable_warnings
COMMIT;

View File

@@ -0,0 +1,50 @@
################################################################################
#
# Executes INSERT ... SELECT statements.
#
################################################################################
SET autocommit = 0;
START TRANSACTION;
# We need this to determine how many rows to update after insert (we may insert mulitple rows).
let $pk_before= `SELECT MAX(`pk`) FROM t1`;
# Insert rows unless we have duplicate values in unique indexes.
# WHERE conditions are more or less arbitrary.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD, ER_DUP_ENTRY
INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `is_uncommitted`, `is_consistent`)
SELECT src.`id`, src.`int1`, src.`int1_key`, src.`int1_unique`,
src.`int2`, src.`int2_key`, src.`int2_unique`,
src.`for_update`, src.`is_uncommitted`, src.`is_consistent`
FROM t1 AS src
WHERE (src.`pk` BETWEEN 1000 AND 1049) AND (src.`id` > 0) AND (src.`is_consistent` = 1) AND (src.`int1_unique` MOD 8 = 0)
ON DUPLICATE KEY UPDATE `int1_unique`= src.`int1_unique` + CONNECTION_ID() + 1000, `int2_unique`= src.`int2_unique` - (CONNECTION_ID()+1000);
--source suite/engines/rr_trx/include/check_for_error_rollback.inc
# ROW_COUNT may be negative. If positive (and non-zero), update the rows we inserted.
let $rows = `SELECT @rows:=ROW_COUNT()`;
# Make sure $rows is never negative (to avoid infitite loop below).
if(`SELECT IF(@rows < 1, 1, 0)`)
{
let $rows = 0;
}
--echo *** Updating id and connection_id if we actually inserted something (query log disabled)...
# Conditional, so skip query log
--disable_query_log
# We risk duplicate entries... in that case @@last_insert_id is 0
while ($rows)
{
# We actually inserted a row, so update id, conn_id, timestamp
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
eval UPDATE t1 SET `connection_id` = CONNECTION_ID(), `id` = 2 WHERE `pk` = $pk_before + $rows;
--dec $rows
}
--enable_query_log
COMMIT;

View File

@@ -0,0 +1,94 @@
################################################################################
#
# Purpose: Execute changes to t1 and roll back to potentially mess up
# REPEATABLE READ queries in other transactions.
#
# - Start transaction
# - Do "bogus" updates
# - ROLLBACK
# - Do "bogus" inserts
# - Do bogus deletes
# - ROLLBACK
#
# Handles rows with pk between 500 and 550.
#
################################################################################
# TODO: Handle tx errors (possible deadlocks?)
SET autocommit = 0;
START TRANSACTION;
--echo *** All changes done in this test / transaction will be rolled back.
let $conn_id= `SELECT CONNECTION_ID()`;
# "Bogus" updates:
# Disabling query log to avoid complex filtering of output in order to keep
# result file diffing clean.
--echo *** Disabling query log.
--disable_query_log
--echo *** Executing UPDATE on rows 501 -- 549 with id = 50, single statement.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
eval UPDATE t1 SET `int1` = 50,
`int1_key` = `int1_key` + 50,
`int2` = `int2` - 50,
`id` = 50,
`is_consistent` = 0,
`is_uncommitted` = 1,
`connection_id` = $conn_id
WHERE `pk` > 500 AND `pk` < 550;
--echo *** Executing UPDATE on rows 526 -- 549 with id = 50, single statement.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
eval UPDATE t1 SET `int2_key` = 50,
`id` = 50,
`is_consistent` = 0,
`is_uncommitted` = 1,
`connection_id` = $conn_id
WHERE `pk` > 525 AND `pk` < 550;
# Still "bogus" updates... Now update 25 rows in succession, single field:
--echo *** Executing UPDATE on rows 501 -- 525 with id = 50, multiple statements.
let $updates= 25;
while ($updates)
{
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
eval UPDATE t1 SET `int2_key` = 50,
`id` = 50,
`is_consistent` = 0,
`is_uncommitted` = 1,
`connection_id` = $conn_id
WHERE `pk` = (SELECT 526 - $updates);
dec $updates;
}
--echo *** ROLLBACK
ROLLBACK;
--echo *** START TRANSACTION
START TRANSACTION;
# Now do some "bogus" inserts:
--echo *** Executing INSERTs of rows with id = 50, 2 statements.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_DUP_ENTRY
eval INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`, `connection_id`)
VALUES (50, 505050, -505050, 505050, $conn_id);
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_DUP_ENTRY
eval INSERT INTO t1 (`id`, `int2`, `int2_key`, `int2_unique`, `connection_id`)
VALUES (50, -505050, 505050, -505050, $conn_id);
# And some "bogus" deletes:
--echo *** Executing DELETE of rows with pk between 449 and 540, single statement.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
DELETE FROM t1 WHERE `pk` BETWEEN 449 AND 540;
--echo *** Enabling query log.
--enable_query_log
ROLLBACK;

View File

@@ -0,0 +1,101 @@
#######################################################################################
#
# Executes REPLACE statements (effectively INSERT, or DELETE then INSERT if
# a duplicate value for a unique index or primary key is specified.
#
# Transaction 1: REPLACE INTO
# Transaction 2: REPLACE INTO SELECT
#
# NOTE: We should not delete rows where pk MOD 5 = 0 AND pk < 1001
# (consistency check, see rr_c_count_not_zero).
# So, make sure to insert/replace rows with pk > 1000, etc.
#
# Note that one REPLACE insert may replace more than one row, if we have
# multiple columns with unique indexes.
# Vary the values for unique columns so that we don't replace the same row every time.
######################################################################################
SET autocommit = 0;
###########################
# Transaction 1
###########################
START TRANSACTION;
# In order to avoid replacing a "reserved" row (pk mod 5 = 0 AND pk <= 1000), we need
# to pick a row to replace which fulfills this requirement (we cannot replace an arbitrary row).
# We also should not replace internally inconsistent rows, as this would not be a zero-sum transaction.
# We select a pk between 200 and 1200 depending on conn_id, not including those where pk MOD 5 = 0.
# This will cost an extra roundtrip and reduce concurency, but there is not much else to do apart
# from not having unique indexes in the table.
# Using FOR UPDATE to avoid letting other treads change the uniques values or the row's consistency
# properties in the mean time.
--echo *** Disabling result log
--disable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT @pk:=`pk`, @unique:=`int1_unique`
FROM t1
WHERE `pk` MOD 5 <> 0
AND `pk` > 200 + (CONNECTION_ID() MOD 1000)
AND `int1_unique` NOT IN (SELECT `int1_unique` FROM t1 WHERE (`pk` < 1000 AND `pk` MOD 5 = 0) OR `is_consistent` = 0)
AND -`int1_unique` NOT IN (SELECT `int2_unique` FROM t1 WHERE (`pk` < 1000 AND `pk` MOD 5 = 0) OR `is_consistent` = 0)
AND `is_consistent`= 1
LIMIT 1 FOR UPDATE;
--echo *** Enabling result log
--enable_result_log
--source suite/engines/rr_trx/include/check_for_error_rollback_skip.inc
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
REPLACE INTO t1 SET `pk` = @pk,
`id` = 7,
`int1` = 7,
`int1_key` = -7,
`int1_unique` = @unique,
`int2` = -7,
`int2_key` = 7,
`int2_unique` = -@unique,
`connection_id` = CONNECTION_ID(),
`is_consistent` = 1;
COMMIT;
###########################
# Transaction 2
###########################
START TRANSACTION;
# Same rules apply as in previous transaction. Do not replace a "reserved" or inconsistent row.
# We get Warning "1592: Statement is not safe to log in statement mode" with server 5.1
# due to LIMIT (see Bug#42415 and Bug#42851).
--disable_warnings
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
REPLACE INTO t1
SELECT * FROM t1
WHERE `pk` > 1000 + CONNECTION_ID() MOD 777
AND `int1_unique` NOT IN (SELECT `int1_unique` FROM t1 WHERE `pk` < 1000 OR `is_consistent` = 0)
AND `int2_unique` NOT IN (SELECT `int2_unique` FROM t1 WHERE `pk` < 1000 OR `is_consistent` = 0)
AND `pk` MOD 5 <> 0
AND `is_consistent` = 1
ORDER BY `pk` LIMIT 1;
--enable_warnings
# Conditional, so skip query log:
--disable_query_log
--echo *** Updating replaced row (if applicable)
# Update id, conn_id if we successfully replaced a row.
if (`SELECT IF(ROW_COUNT() > 0, 1, 0)`)
{
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `id` = 8, `connection_id` = CONNECTION_ID() WHERE `pk` = @@last_insert_id;
--source suite/engines/rr_trx/include/check_for_error_rollback.inc
}
--enable_query_log
COMMIT;

View File

@@ -0,0 +1,13 @@
################################################################################
#
# No uncommitted changes should be visible to a REPEATABLE-READ transaction
#
################################################################################
SET autocommit = 0;
START TRANSACTION;
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT
SELECT * FROM t1 WHERE `is_uncommitted` > 0;
COMMIT;

View File

@@ -0,0 +1,62 @@
################################################################################
#
# Part of checking that REPEATABLE-READ transactions are indeed repeatable read.
#
# This test executes several queries in the same transaction which should return
# the exact same results. Some of the queries will use LIMIT, others will not.
#
# In order to trigger Falcon's LIMIT optimization we need to ORDER BY an indexed
# column, use a WHERE range predicate and include the LIMIT keyword.
#
################################################################################
SET autocommit = 0;
START TRANSACTION;
--echo Comparing results from 2 queries (unless we deadlock or some such)...
# Without LIMIT
let $query= SELECT * FROM t1 WHERE `pk` > 1000 ORDER BY `int1_key`;
--echo *** Query 1: $query
--source suite/engines/rr_trx/include/record_query_all_columns.inc
--echo *** Filler:
SELECT SLEEP(1);
# With LIMIT. We assume that t1 will not have more than <limit> rows.
# With some large limit, just to exercise the optimization but return the same results:
let $query= SELECT * FROM t1 WHERE `pk` > 1000 ORDER BY `int1_key` LIMIT 987654321;
--echo *** Query 2: $query
# Disable warnings (due to warning about unsafe binlogging with LIMIT).
--disable_warnings
--source suite/engines/rr_trx/include/record_query_all_columns.inc
--enable_warnings
--echo *** Filler: Do something other than sleep while waiting for other transactions to do stuff...
CREATE TEMPORARY TABLE tmpSelectLimitNoLimit (a INT, b VARCHAR(255), c TIMESTAMP, KEY(a));
INSERT INTO tmpSelectLimitNoLimit VALUES
(-1, 'This is a filler', NOW()),
(0, 'More stuff', NOW()),
(999999999, 'Even more bogus data', NOW()),
(-98765, 'Even more bogus data', NOW());
SELECT * FROM tmpSelectLimitNoLimit WHERE a < -99999999 ORDER BY a;
# With LIMIT.
let $query= SELECT * FROM t1 WHERE `pk` > 1000 ORDER BY `int1_key` LIMIT 987654321;
--echo *** Query 3: $query
--disable_warnings
--source suite/engines/rr_trx/include/record_query_all_columns.inc
--enable_warnings
--echo *** Filler:
UPDATE tmpSelectLimitNoLimit SET a = 3;
SELECT SLEEP(1);
#Without LIMIT:
let $query= SELECT * FROM t1 WHERE `pk` > 1000 ORDER BY `int1_key`;
--echo *** Query 4: $query
--source suite/engines/rr_trx/include/record_query_all_columns.inc
# Compare queries
--source suite/engines/rr_trx/include/check_repeatable_read_all_columns.inc
COMMIT;

View File

@@ -0,0 +1,45 @@
################################################################################
#
# This test contains a single transaction performing queries against the test
# data. The main purpose is to test REPEATABLE READ results, that is to verify
# that reads (SELECTs) are indeed repeatable during a REPEATABLE READ
# transaction.
#
# Generally, queries which should yield the same results at one moment in time
# should also yield the same results later in the same transaction. In some
# configurations, however, phantom reads are allowed (may e.g. depend on
# settings such as falcon_consistent_read).
#
# To detect missing rows, modified rows and possibly invalid (uncommitted)
# extra (phantom) rows, we store query results in temporary tables and compare
# using special queries. Using includes for this to avoid unnecessary "clutter"
# in each select-only test.
#
# This and similar tests (transactions) should contain some random "filler"
# between the SELECTs so that they may run in different conditions, such as:
# * some other transactions may have commited in the meantime
# * a scavenge run may have happened (falcon)
# * etc
#
# Such a "random filler" can be:
# * SLEEP()
# * Bogus updates on some unrelated temporary table that was created for the purpose
# * savepoint + bogus updates on the main tables + rollback to savepoint
# * Inserts on new records (which will show up as "phantom" records in subsequent SELECTs)
#
################################################################################
SET autocommit = 0;
START TRANSACTION;
# We do a full table scan in this test.
# Note: May cause deadlocks! (handled in sourced scripts).
let $query= SELECT * from t1;
--source suite/engines/rr_trx/include/record_query_all_columns.inc
SELECT SLEEP(1);
let $query= SELECT * from t1;
--source suite/engines/rr_trx/include/record_query_all_columns.inc
--source suite/engines/rr_trx/include/check_repeatable_read_all_columns.inc
COMMIT;

View File

@@ -0,0 +1,22 @@
SELECT @sum:=SUM(`int1` + `int1_key` + `int1_unique`
+ `int2` + `int2_key` + `int2_unique`)
AS TotalSum
FROM t1;
# Get some diagnostics if sum is non-zero
if (`SELECT @sum`)
{
# Get sum of consistent rows (should be 0)
SELECT SUM(`int1` + `int1_key` + `int1_unique` + `int2` + `int2_key` + `int2_unique`) FROM t1 where `is_consistent` = 1;
# Get sum of inconsistent rows (should be non-0 if the total sum is to be non-0)
SELECT SUM(`int1` + `int1_key` + `int1_unique` + `int2` + `int2_key` + `int2_unique`) FROM t1 where `is_consistent` = 0;
# Get number of inconsistent rows
SELECT COUNT(*) FROM t1 WHERE `is_consistent` = 0;
# See if there is a single row which is the culprit
SELECT * FROM t1 WHERE `int1` + `int1_key` + `int1_unique` + `int2` + `int2_key` + `int2_unique` = @sum;
SELECT * FROM t1 WHERE `int1` + `int1_key` + `int1_unique` + `int2` + `int2_key` + `int2_unique` = -@sum;
}

View File

@@ -0,0 +1,406 @@
######################################################################################
#
# Using UPDATE statements in order to:
# - move a record outside the curent index extremities (min and max values).
# - move a record forward and backward in the index (add and subtract some number).
# - move a record into, out of and inside a locked index interval.
#
# This test is using FOR UPDATE to lock index ranges and to make sure we do not
# base new values on old record versions (in the face of concurrent updates).
#
# Need to handle any tx errors, ROLLBACK if needed to maintain table consistency.
#
# This test runs several transactions, each transaction executing one or more
# UPDATE statements and potentially other helping SELECT queries.
#
# If we pick a row (pk) by random that does not exist, it does not matter (will try
# again next time), but we should probably keep this to a minimum.
#
# We need a way to maintain table consistency when updating a field with an arbitrary
# number. Using this algorithm:
# * We need to know: How much does the table sum change with this update?
# * Change is: <new value> - <old value>.
# * We must then add back the negative of that to a different field in the table.
#
# Example: Columns a and b with values a = a1, b = b1
# We want to update a to a2. We need to figure out what b2 should be.
# - Save a1 (e.g. as user variable)
# - Update a to a2 and b to b2 = b - (a2 - a1)
# - In other words: a changed with a2 - a1.
# b changed with b2 - b1 = b1 - (a2 - a1) - b1 = -(a2 - a1)
# => Zero-sum change.
#
# NOTE: Consider splitting this up into multiple test files if we get too many
# skips due to locking errors (see check_for_error_rollback_skip.inc) .
######################################################################################
SET autocommit = 0;
###################
# Transaction 1
###################
--echo
--echo *** Move record out of locked portion of index:
--echo
START TRANSACTION;
--echo *** Disabling result log (result will vary)
--disable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT * FROM t1 WHERE `int1_key` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_key`, `pk` LIMIT 10 FOR UPDATE;
# Even if SELECT FOR UPDATE failed, we can continue - we just don't necessarily move the row out of locked portion of index.
# We (may) have locked some records (if any were found).
# Set an int1_key to a value outside of this range.
# First pick a pk. We may use this later in the transaction.
SELECT @pk:=`pk` FROM t1 WHERE `int1_key` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_key`, `pk` LIMIT 1;
--echo *** Enabling result log
--enable_result_log
# We should mark row as consistent if the row-sum is 0.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `int1_key` = `int1_key` + 50,
`int2_key` = `int2_key` - 50,
`id` = 10,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
###################
# Transaction 2
###################
--echo
--echo *** Move record out of locked portion of UNIQUE index:
--echo
START TRANSACTION;
--echo *** Disabling result log (result will vary)
--disable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT * FROM t1 WHERE `int1_unique` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_unique` LIMIT 10 FOR UPDATE;
# Even if SELECT FOR UPDATE failed, we can continue - we just don't necessarily move the row out of locked portion of index.
# We (may) have locked some records (if any were found)
# Set an int1_unique to a value outside of this range.
# First pick a pk to use several times later in the transaction.
SELECT @pk:=`pk` FROM t1 WHERE `int1_unique` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_unique` LIMIT 1;
--echo *** Enabling result log
--enable_result_log
# We should mark row as consistent if the row-sum is 0.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD, ER_DUP_ENTRY
UPDATE t1 SET `int1_unique` = `int1_unique` + 50 + CONNECTION_ID(),
`int2_unique` = `int2_unique` - 50 - CONNECTION_ID(),
`id` = 11,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
###################
# Transaction 3
###################
# Not doing this for unique index (too tricky to avoid DUP_ENTRY...)
--echo
--echo *** Move record into locked portion of index:
--echo
START TRANSACTION;
--echo *** Disabling result log (result will vary)
--disable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT * FROM t1 WHERE `int1_key` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_key`, `pk` LIMIT 10 FOR UPDATE;
# If the above statement resulted in deadlock we can still continue - the test will just try to do UPDATEs without explicitly locking first.
# We (may) have locked some records (if any were found)
# Set an int1_key to a value outside of this range.
# Pick a pk to use later in the transaction. Select one that is outside of the locked range.
SELECT @pk:=`pk` FROM t1 WHERE `int1_key` > 1030 ORDER BY `int1_key`, `pk` LIMIT 1;
--echo *** Enabling result log
--enable_result_log
# We should mark row as consistent if the row-sum is 0.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `int1_key` = `int1_key` + 50,
`int2_key` = `int2_key` - 50,
`id` = 12,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
###################
# Transaction 4
###################
# Not doing this for unique index (too tricky to avoid DUP_ENTRY...)
--echo
--echo *** Move record inside locked portion of index (move it but stay inside the locked range):
--echo
START TRANSACTION;
--echo *** Disabling result log (result will vary)
--disable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT * FROM t1 WHERE `int1_key` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_key`, `pk` LIMIT 10 FOR UPDATE;
# If the above statement resulted in deadlock we can still continue - the test will just try to do UPDATEs without explicitly locking first.
# We (may) have locked some records (if any were found)
# Set an int1_key to a value outside of this range.
# Pick a pk to use later in the transaction. Select one that is outside of the locked range.
SELECT @pk:=`pk` FROM t1 WHERE `int1_key` BETWEEN 981 + 10 + (CONNECTION_ID() MOD 15) AND 1019 ORDER BY `int1_key`, `pk` LIMIT 1;
--echo *** Enabling result log
--enable_result_log
# We should mark row as consistent if the row-sum is 0.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `int1_key` = `int1_key` - 10,
`int2_key` = `int2_key` + 10,
`id` = 13,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
###################
# Transaction 5
###################
--echo
--echo *** Move record outside existing index boundary (max):
--echo
START TRANSACTION;
--echo *** Disabling result log (results will vary)
--disable_result_log
# Get the max value of `int2_key`.
# Pick a random pk value.
# The pk identifies a row that we want to update to move its int2_key value above the current MAX.
SELECT @max:=MAX(`int2_key`), @pk:=FLOOR(1 + RAND() * (MAX(`pk`) - 1)) FROM t1;
# Get the current value of `int2_key` of the row we are going to update.
# We need this to be able to calculate values for maintaining table consistency.
# Also, we should mark row as consistent if the row-sum is 0, so calculate the sum of the data ints.
# Hence, we need to lock the row to avoid concurrent modifications.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT * FROM t1 WHERE `pk` = @pk FOR UPDATE;
# Do not continue if the above FOR UPDATE locking fails - we may end up making the table/row inconstistent.
--source suite/engines/rr_trx/include/check_for_error_rollback_skip.inc
SELECT @old:=`int2_key`, (@sum:=`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique`) FROM t1 WHERE `pk` = @pk;
--echo *** Enabling result log
--enable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `int2_key` = @max + 1,
`int2` = `int2` - (@max + 1 - @old),
`id` = 14,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(@sum = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
## Do the same with a UNIQUE index
###################
# Transaction 6
###################
--echo
--echo *** Move record outside existing UNIQUE index boundary (max):
--echo
START TRANSACTION;
--echo *** Disabling result log (results will vary)
--disable_result_log
# Get the max value of `int2_unique`.
# Pick a random pk value.
# The pk identifies a row that we want to update to move its int2_key value above the current MAX.
SELECT @max:=MAX(`int2_unique`), @pk:=FLOOR(1 + RAND() * (MAX(`pk`) - 1)) FROM t1;
# Get the current value of `int2_key` of the row we are going to update.
# We need this to be able to calculate values for maintaining table consistency.
# Also, we should mark row as consistent if the row-sum is 0, so calculate the sum of the data ints.
# We need to lock the row to avoid concurrent "silent" modifications.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT * FROM t1 WHERE `pk` = @pk FOR UPDATE;
# Do not continue if the above FOR UPDATE locking fails - we may end up making the table/row inconstistent.
--source suite/engines/rr_trx/include/check_for_error_rollback_skip.inc
SELECT @old:=`int2_unique`, (@sum:=`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique`) FROM t1 WHERE `pk` = @pk;
--echo *** Enabling result log
--enable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD, ER_DUP_ENTRY
UPDATE t1 SET `int2_unique` = @max + 1,
`int2` = `int2` - (@max + 1 - @old),
`id` = 15,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(@sum = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
--source suite/engines/rr_trx/include/check_for_error_rollback_skip.inc
# Verify sum after update:
if(`SELECT IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` <> 0 AND `is_consistent` = 1, 1, 0) WHERE `pk` = @pk`)
{
--echo FAIL - updated row, set is_consistent = 1 but sum is not 0!
SELECT `pk`, `int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` WHERE `pk` = @pk;
}
COMMIT;
###################
# Transaction 7
###################
--echo
--echo *** Move record outside existing index boundary (min):
--echo
START TRANSACTION;
--echo *** Disabling result log (results will vary)
--disable_result_log
# Get the min value of `int1_key`.
# Pick a random pk value.
# The pk identifies a row that we want to update to move its int1_key value below the current MIN.
SELECT @min:=MIN(`int1_key`), @pk:=FLOOR(1 + RAND() * (MAX(`pk`) - 1)) FROM t1;
# Get the current value of `int1_key` of the row we are going to update.
# We need this to be able to calculate values for maintaining table consistency.
# Also, we should mark row as consistent if the row-sum is 0, so calculate the sum of the data ints.
# Hence, we need to lock the row to avoid concurrent modifications.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT * FROM t1 WHERE `pk` = @pk FOR UPDATE;
# Do not continue if the above FOR UPDATE locking fails - we may end up making the table/row inconstistent.
--source suite/engines/rr_trx/include/check_for_error_rollback_skip.inc
SELECT @old:=`int1_key`, (@sum:=`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique`) FROM t1 WHERE `pk` = @pk;
--echo *** Enabling result log
--enable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `int1_key` = @min - 1,
`int1` = `int1` - (@min - 1 - @old),
`id` = 16,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(@sum = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
## Do the same with a UNIQUE index
###################
# Transaction 8
###################
--echo
--echo *** Move record outside existing UNIQUE index boundary (min):
--echo
START TRANSACTION;
--echo *** Disabling result log (results will vary)
--disable_result_log
# Get the max value of `int1_unique`.
# Pick a random pk value.
# The pk identifies a row that we want to update to move its int2_key value above the current MAX.
SELECT @min:=MIN(`int1_unique`), @pk:=FLOOR(1 + RAND() * (MAX(`pk`) - 1)) FROM t1;
# Get the current value of `int2_key` of the row we are going to update.
# We need this to be able to calculate values for maintaining table consistency.
# Also, we should mark row as consistent if the row-sum is 0, so calculate the sum of the data ints.
# Hence, we need to lock the row to avoid concurrent modifications.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT * FROM t1 WHERE `pk` = @pk FOR UPDATE;
# Do not continue if the above FOR UPDATE locking fails - we may end up making the table/row inconstistent.
--source suite/engines/rr_trx/include/check_for_error_rollback_skip.inc
SELECT @old:=`int1_unique`, (@sum:=`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique`) FROM t1 WHERE `pk` = @pk;
--echo *** Enabling result log
--enable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD, ER_DUP_ENTRY
UPDATE t1 SET `int1_unique` = @min - 1,
`int1` = `int1` - (@min - 1 - @old),
`id` = 17,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(@sum = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
###################
# Transaction 9
###################
--echo
--echo *** Move record forward in index (add some number):
--echo
START TRANSACTION;
# Updating a "random" row.
# Subtract the same number from another field to maintain consistency.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `int2_key` = `int2_key` + 16,
`int2` = `int2` - 16,
`id` = 18,
`connection_id` = CONNECTION_ID(),
`thread_id` = 0
WHERE `pk` = CONNECTION_ID() MOD 1000;
## Skip the same with a UNIQUE index (we need to update to > MAX or find some missing value in the middle). See MAX update in previous transactions.
--echo
--echo *** Move record backward in index (subtract some number):
--echo
# Updating a "random" row.
# Add the same number to another field to maintain consistency.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `int1_key` = `int1_key` - 16,
`int1` = `int1` + 16,
`id` = 18,
`connection_id` = CONNECTION_ID(),
`thread_id` = 0
WHERE `pk` = CONNECTION_ID() + 16 MOD 1000;
COMMIT;

View File

@@ -0,0 +1,399 @@
######################################################################################
#
# Using UPDATE statements in order to:
# - move a record outside the curent index extremities (min and max values).
# - move a record forward and backward in the index (add and subtract some number).
# - move a record into, out of and inside a locked index interval.
#
# This test is using FOR UPDATE to lock index ranges and to make sure we do not
# base new values on old record versions (in the face of concurrent updates).
#
# Need to handle any tx errors, ROLLBACK if needed to maintain table consistency.
#
# This test runs several transactions, each transaction executing one or more
# UPDATE statements and potentially other helping SELECT queries.
#
# If we pick a row (pk) by random that does not exist, it does not matter (will try
# again next time), but we should probably keep this to a minimum.
#
# We need a way to maintain table consistency when updating a field with an arbitrary
# number. Using this algorithm:
# * We need to know: How much does the table sum change with this update?
# * Change is: <new value> - <old value>.
# * We must then add back the negative of that to a different field in the table.
#
# Example: Columns a and b with values a = a1, b = b1
# We want to update a to a2. We need to figure out what b2 should be.
# - Save a1 (e.g. as user variable)
# - Update a to a2 and b to b2 = b - (a2 - a1)
# - In other words: a changed with a2 - a1.
# b changed with b2 - b1 = b1 - (a2 - a1) - b1 = -(a2 - a1)
# => Zero-sum change.
#
# NOTE: Consider splitting this up into multiple test files if we get too many
# skips due to locking errors (see check_for_error_rollback_skip.inc) .
######################################################################################
SET autocommit = 0;
###################
# Transaction 1
###################
--echo
--echo *** Move record out of locked portion of index:
--echo
START TRANSACTION;
--echo *** Disabling result log (result will vary)
--disable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT * FROM t1 WHERE `int1_key` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_key`, `pk` FOR UPDATE;
# Even if SELECT FOR UPDATE failed, we can continue - we just don't necessarily move the row out of locked portion of index.
# We (may) have locked some records (if any were found).
# Set an int1_key to a value outside of this range.
# First pick a pk. We may use this later in the transaction.
SELECT @pk:=`pk` FROM t1 WHERE `int1_key` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_key`, `pk` LIMIT 1;
--echo *** Enabling result log
--enable_result_log
# We should mark row as consistent if the row-sum is 0.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `int1_key` = `int1_key` + 50,
`int2_key` = `int2_key` - 50,
`id` = 10,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
###################
# Transaction 2
###################
--echo
--echo *** Move record out of locked portion of UNIQUE index:
--echo
START TRANSACTION;
--echo *** Disabling result log (result will vary)
--disable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT * FROM t1 WHERE `int1_unique` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_unique` FOR UPDATE;
# Even if SELECT FOR UPDATE failed, we can continue - we just don't necessarily move the row out of locked portion of index.
# We (may) have locked some records (if any were found)
# Set an int1_unique to a value outside of this range.
# First pick a pk to use several times later in the transaction.
SELECT @pk:=`pk` FROM t1 WHERE `int1_unique` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_unique` LIMIT 1;
--echo *** Enabling result log
--enable_result_log
# We should mark row as consistent if the row-sum is 0.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD, ER_DUP_ENTRY
UPDATE t1 SET `int1_unique` = `int1_unique` + 50 + CONNECTION_ID(),
`int2_unique` = `int2_unique` - 50 - CONNECTION_ID(),
`id` = 11,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
###################
# Transaction 3
###################
# Not doing this for unique index (too tricky to avoid DUP_ENTRY...)
--echo
--echo *** Move record into locked portion of index:
--echo
START TRANSACTION;
--echo *** Disabling result log (result will vary)
--disable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT * FROM t1 WHERE `int1_key` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_key`, `pk` FOR UPDATE;
# If the above statement resulted in deadlock we can still continue - the test will just try to do UPDATEs without explicitly locking first.
# We (may) have locked some records (if any were found)
# Set an int1_key to a value outside of this range.
# Pick a pk to use later in the transaction. Select one that is outside of the locked range.
SELECT @pk:=`pk` FROM t1 WHERE `int1_key` > 1030 ORDER BY `int1_key`, `pk` LIMIT 1;
--echo *** Enabling result log
--enable_result_log
# We should mark row as consistent if the row-sum is 0.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `int1_key` = `int1_key` + 50,
`int2_key` = `int2_key` - 50,
`id` = 12,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
###################
# Transaction 4
###################
# Not doing this for unique index (too tricky to avoid DUP_ENTRY...)
--echo
--echo *** Move record inside locked portion of index (move it but stay inside the locked range):
--echo
START TRANSACTION;
--echo *** Disabling result log (result will vary)
--disable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT * FROM t1 WHERE `int1_key` BETWEEN 981 + (CONNECTION_ID() MOD 15) AND 1030 ORDER BY `int1_key`, `pk` FOR UPDATE;
# If the above statement resulted in deadlock we can still continue - the test will just try to do UPDATEs without explicitly locking first.
# We (may) have locked some records (if any were found)
# Set an int1_key to a value outside of this range.
# Pick a pk to use later in the transaction. Select one that is outside of the locked range.
SELECT @pk:=`pk` FROM t1 WHERE `int1_key` BETWEEN 981 + 10 + (CONNECTION_ID() MOD 15) AND 1019 ORDER BY `int1_key`, `pk` LIMIT 1;
--echo *** Enabling result log
--enable_result_log
# We should mark row as consistent if the row-sum is 0.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `int1_key` = `int1_key` - 10,
`int2_key` = `int2_key` + 10,
`id` = 13,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique` = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
###################
# Transaction 5
###################
--echo
--echo *** Move record outside existing index boundary (max):
--echo
START TRANSACTION;
--echo *** Disabling result log (results will vary)
--disable_result_log
# Get the max value of `int2_key`.
# Pick a random pk value.
# The pk identifies a row that we want to update to move its int2_key value above the current MAX.
SELECT @max:=MAX(`int2_key`), @pk:=FLOOR(1 + RAND() * (MAX(`pk`) - 1)) FROM t1;
# Get the current value of `int2_key` of the row we are going to update.
# We need this to be able to calculate values for maintaining table consistency.
# Also, we should mark row as consistent if the row-sum is 0, so calculate the sum of the data ints.
# Hence, we need to lock the row to avoid concurrent modifications.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT * FROM t1 WHERE `pk` = @pk FOR UPDATE;
# Do not continue if the above FOR UPDATE locking fails - we may end up making the table/row inconstistent.
--source suite/engines/rr_trx/include/check_for_error_rollback_skip.inc
SELECT @old:=`int2_key`, (@sum:=`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique`) FROM t1 WHERE `pk` = @pk;
--echo *** Enabling result log
--enable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `int2_key` = @max + 1,
`int2` = `int2` - (@max + 1 - @old),
`id` = 14,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(@sum = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
## Do the same with a UNIQUE index
###################
# Transaction 6
###################
--echo
--echo *** Move record outside existing UNIQUE index boundary (max):
--echo
START TRANSACTION;
--echo *** Disabling result log (results will vary)
--disable_result_log
# Get the max value of `int2_unique`.
# Pick a random pk value.
# The pk identifies a row that we want to update to move its int2_key value above the current MAX.
SELECT @max:=MAX(`int2_unique`), @pk:=FLOOR(1 + RAND() * (MAX(`pk`) - 1)) FROM t1;
# Get the current value of `int2_key` of the row we are going to update.
# We need this to be able to calculate values for maintaining table consistency.
# Also, we should mark row as consistent if the row-sum is 0, so calculate the sum of the data ints.
# We need to lock the row to avoid concurrent "silent" modifications.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT * FROM t1 WHERE `pk` = @pk FOR UPDATE;
# Do not continue if the above FOR UPDATE locking fails - we may end up making the table/row inconstistent.
--source suite/engines/rr_trx/include/check_for_error_rollback_skip.inc
SELECT @old:=`int2_unique`, (@sum:=`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique`) FROM t1 WHERE `pk` = @pk;
--echo *** Enabling result log
--enable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD, ER_DUP_ENTRY
UPDATE t1 SET `int2_unique` = @max + 1,
`int2` = `int2` - (@max + 1 - @old),
`id` = 15,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(@sum = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
--source suite/engines/rr_trx/include/check_for_error_rollback_skip.inc
COMMIT;
###################
# Transaction 7
###################
--echo
--echo *** Move record outside existing index boundary (min):
--echo
START TRANSACTION;
--echo *** Disabling result log (results will vary)
--disable_result_log
# Get the min value of `int1_key`.
# Pick a random pk value.
# The pk identifies a row that we want to update to move its int1_key value below the current MIN.
SELECT @min:=MIN(`int1_key`), @pk:=FLOOR(1 + RAND() * (MAX(`pk`) - 1)) FROM t1;
# Get the current value of `int1_key` of the row we are going to update.
# We need this to be able to calculate values for maintaining table consistency.
# Also, we should mark row as consistent if the row-sum is 0, so calculate the sum of the data ints.
# Hence, we need to lock the row to avoid concurrent modifications.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT * FROM t1 WHERE `pk` = @pk FOR UPDATE;
# Do not continue if the above FOR UPDATE locking fails - we may end up making the table/row inconstistent.
--source suite/engines/rr_trx/include/check_for_error_rollback_skip.inc
SELECT @old:=`int1_key`, (@sum:=`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique`) FROM t1 WHERE `pk` = @pk;
--echo *** Enabling result log
--enable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `int1_key` = @min - 1,
`int1` = `int1` - (@min - 1 - @old),
`id` = 16,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(@sum = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
## Do the same with a UNIQUE index
###################
# Transaction 8
###################
--echo
--echo *** Move record outside existing UNIQUE index boundary (min):
--echo
START TRANSACTION;
--echo *** Disabling result log (results will vary)
--disable_result_log
# Get the max value of `int1_unique`.
# Pick a random pk value.
# The pk identifies a row that we want to update to move its int2_key value above the current MAX.
SELECT @min:=MIN(`int1_unique`), @pk:=FLOOR(1 + RAND() * (MAX(`pk`) - 1)) FROM t1;
# Get the current value of `int2_key` of the row we are going to update.
# We need this to be able to calculate values for maintaining table consistency.
# Also, we should mark row as consistent if the row-sum is 0, so calculate the sum of the data ints.
# Hence, we need to lock the row to avoid concurrent modifications.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
SELECT * FROM t1 WHERE `pk` = @pk FOR UPDATE;
# Do not continue if the above FOR UPDATE locking fails - we may end up making the table/row inconstistent.
--source suite/engines/rr_trx/include/check_for_error_rollback_skip.inc
SELECT @old:=`int1_unique`, (@sum:=`int1` + `int2` + `int1_key` + `int2_key` + `int1_unique` + `int2_unique`) FROM t1 WHERE `pk` = @pk;
--echo *** Enabling result log
--enable_result_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD, ER_DUP_ENTRY
UPDATE t1 SET `int1_unique` = @min - 1,
`int1` = `int1` - (@min - 1 - @old),
`id` = 17,
`connection_id` = CONNECTION_ID(),
`is_consistent` = IF(@sum = 0, 1, 0),
`thread_id` = 0
WHERE `pk` = @pk;
COMMIT;
###################
# Transaction 9
###################
--echo
--echo *** Move record forward in index (add some number):
--echo
START TRANSACTION;
# Updating a "random" row.
# Subtract the same number from another field to maintain consistency.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `int2_key` = `int2_key` + 16,
`int2` = `int2` - 16,
`id` = 18,
`connection_id` = CONNECTION_ID(),
`thread_id` = 0
WHERE `pk` = CONNECTION_ID() MOD 1000;
## Skip the same with a UNIQUE index (we need to update to > MAX or find some missing value in the middle). See MAX update in previous transactions.
--echo
--echo *** Move record backward in index (subtract some number):
--echo
# Updating a "random" row.
# Add the same number to another field to maintain consistency.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `int1_key` = `int1_key` - 16,
`int1` = `int1` + 16,
`id` = 18,
`connection_id` = CONNECTION_ID(),
`thread_id` = 0
WHERE `pk` = CONNECTION_ID() + 16 MOD 1000;
COMMIT;

View File

@@ -0,0 +1,41 @@
################################################################################
#
# UPDATE using multiple statements. Subtract a value (4) from one field, then
# add the same value to another field in the same row.
#
# Mind any tx errors, ROLLBACK if needed to "equalize" different engines.
#
################################################################################
SET autocommit = 0;
START TRANSACTION;
eval SET @conn_id = CONNECTION_ID(), @thread_id = 0;
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `int1` = `int1` - 4,
`id` = 4,
`is_consistent` = 0,
`connection_id` = @conn_id,
`thread_id` = @thread_id
WHERE `pk` = 4;
--source suite/engines/rr_trx/include/check_for_error_rollback.inc
--echo *** UPDATEing row with pk = 4 and `int1_key` +=4 if above statement succeeded (query log disabled)...
if (!$error)
{
# This is conditional, so disable query log to avoid irrelevant diffs.
--disable_query_log
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_CHECKREAD
UPDATE t1 SET `int1_key` = `int1_key` + 4,
`is_consistent` = 1,
`connection_id` = @conn_id,
`thread_id` = @thread_id
WHERE `pk` = 4;
--source suite/engines/rr_trx/include/check_for_error_rollback.inc
--enable_query_log
}
COMMIT;