maven依赖:
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-spring-boot3-starter</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>dynamic-datasource-spring-boot3-starter</artifactId>
<version>${dynamic-datasource.version}</version>
</dependency>
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>shardingsphere-jdbc</artifactId>
<version>${shardingsphere.version}</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<optional>true</optional>
</dependency>
依赖版本(该内容不重要🙄):
spring-yml配置:
spring:
datasource:
dynamic:
datasource:
master:
url: jdbc:postgresql://{mater_ip}:{port}/manage
username: {db_username}
password: {db_password}
driver-class-name: org.postgresql.Driver
slave:
url: jdbc:postgresql://{slave_ip}:{ip}/manage
username: {db_username}
password: {db_password}
driver-class-name: org.postgresql.Driver
shardingmaster:
url: jdbc:shardingsphere:classpath:sharding-config.yml
driver-class-name: org.apache.shardingsphere.driver.ShardingSphereDriver
sharding-config-yml配置:
databaseName: shardingmaster
mode:
type: Standalone
repository:
type: JDBC
dataSources:
shardingmaster:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
jdbcUrl: jdbc:postgresql://{sharding_master_ip}:{port}/manage
driverClassName: org.postgresql.Driver
username: {db_username}
password: {db_password}
shardingslave0:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
jdbcUrl: jdbc:postgresql://{sharding_slave_0}:{port}/manage
driverClassName: org.postgresql.Driver
username: {db_username}
password: {db_password}
shardingslave1:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
jdbcUrl: jdbc:postgresql://{sharding_slave_1}:{port}/manage
driverClassName: org.postgresql.Driver
username: {db_username}
password: {dn_password}
rules:
- !SHARDING
tables:
t_user:
actualDataNodes: shardingmaster.t_user_$->{0..4}
tableStrategy:
standard:
shardingColumn: user_id
shardingAlgorithmName: manage_inline
keyGenerateStrategy:
column: user_id
keyGeneratorName: manage_snowflake
shardingAlgorithms:
manage_inline:
type: INLINE
props:
algorithm-expression: t_user_$->{user_id % 5}
allowRangeQueryWithInlineSharding: true
keyGenerators:
manage_snowflake:
type: SNOWFLAKE
- !READWRITE_SPLITTING
dataSources:
readwrite_ds:
writeDataSourceName: shardingmaster
readDataSourceNames:
- shardingslave0
- shardingslave1
transactionalReadQueryStrategy: PRIMARY
loadBalancerName: manage_load_balance_algorithm
loadBalancers:
manage_load_balance_algorithm:
type: ROUND_ROBIN
props:
sql-show: true
基础类:
import com.baomidou.mybatisplus.annotation.IdType;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.annotation.TableLogic;
import com.baomidou.mybatisplus.annotation.Version;
import java.time.LocalDateTime;
import lombok.Data;
@Data
public class BaseEntity {
@TableId(type = IdType.AUTO)
Long userId;
@TableLogic
int deleted;
@Version
int version;
LocalDateTime createTime;
LocalDateTime updateTime;
String createBy;
String updateBy;
}
实体映射类:
import com.baomidou.mybatisplus.annotation.TableName;
import lombok.Data;
import lombok.EqualsAndHashCode;
@EqualsAndHashCode(callSuper = true)
@Data
@TableName("t_user")
public class User extends BaseEntity {
private String username;
private String password;
}
mapper类:
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import org.apache.ibatis.annotations.Mapper;
@Mapper
public interface UserMapper extends BaseMapper<User> {
}
service类:
import com.baomidou.dynamic.datasource.annotation.DS;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
@DS("shardingmaster")
@Service
public class UserGatewayImpl extends ServiceImpl<UserMapper, User> implements UserGateway {
}
ps:别忘了在数据库创建对应的表哦!😉
CREATE TABLE "public"."t_user_0" (
"user_id" int8 NOT NULL,
"username" varchar(255) COLLATE "pg_catalog"."default",
"password" varchar(255) COLLATE "pg_catalog"."default",
"version" int4,
"deleted" int2,
"create_time" date,
"update_time" date,
"create_by" varchar(255) COLLATE "pg_catalog"."default",
"update_by" date
)
CREATE TABLE "public"."t_user_1" (
"user_id" int8 NOT NULL,
"username" varchar(255) COLLATE "pg_catalog"."default",
"password" varchar(255) COLLATE "pg_catalog"."default",
"version" int4,
"deleted" int2,
"create_time" date,
"update_time" date,
"create_by" varchar(255) COLLATE "pg_catalog"."default",
"update_by" date
)
CREATE TABLE "public"."t_user_2" (
"user_id" int8 NOT NULL,
"username" varchar(255) COLLATE "pg_catalog"."default",
"password" varchar(255) COLLATE "pg_catalog"."default",
"version" int4,
"deleted" int2,
"create_time" date,
"update_time" date,
"create_by" varchar(255) COLLATE "pg_catalog"."default",
"update_by" date
)
CREATE TABLE "public"."t_user_3" (
"user_id" int8 NOT NULL,
"username" varchar(255) COLLATE "pg_catalog"."default",
"password" varchar(255) COLLATE "pg_catalog"."default",
"version" int4,
"deleted" int2,
"create_time" date,
"update_time" date,
"create_by" varchar(255) COLLATE "pg_catalog"."default",
"update_by" date
)
CREATE TABLE "public"."t_user_4" (
"user_id" int8 NOT NULL,
"username" varchar(255) COLLATE "pg_catalog"."default",
"password" varchar(255) COLLATE "pg_catalog"."default",
"version" int4,
"deleted" int2,
"create_time" date,
"update_time" date,
"create_by" varchar(255) COLLATE "pg_catalog"."default",
"update_by" date
)
以上基础代码搭建完毕,接下来开始测试数据插入:
@Test
void shardingCreate() {
List<User> users = Lists.newArrayList();
for (int i = 0; i < 100; i++) {
User user = new User();
user.setUsername("user" + i);
user.setPassword("password" + i);
users.add(user);
}
boolean b = userGateway.saveBatch(users);
System.out.println(b);
}
由于我们之前在sharding-config配置文件中添加sql-show: true
(该参数可用于日志打印调试)
可以看到控制台如下输出,代表我们们成功插入了数据,并根据分片规则自动路由到对应的分表
ShardingSphere-SQL : Logic SQL: INSERT INTO t_user ( username, password, deleted, version ) VALUES ( ?, ?, ?, ? )
ShardingSphere-SQL : Actual SQL: shardingmaster ::: INSERT INTO t_user_4 ( username, password, deleted, version, user_id ) VALUES (?, ?, ?, ?, ?) ::: [user0, password0, 0, 0, 1012384350060150784]
==> Parameters: user1(String), password1(String), 0(Integer), 0(Integer)
之后呢?😕
接下来当然是把我们上一步插入的数据查询出来啦😬
@Test
void shardingQuery() {
List<User> users = userGateway.list();
System.out.println(users);
}
日志输出:
ShardingSphere-SQL : Logic SQL: SELECT user_id,username,password,deleted,version,create_time,update_time,create_by,update_by FROM t_user WHERE deleted=0
ShardingSphere-SQL : Actual SQL: shardingmaster ::: SELECT user_id,username,password,deleted,version,create_time,update_time,create_by,update_by FROM t_user_0 WHERE deleted=0 UNION ALL SELECT user_id,username,password,deleted,version,create_time,update_time,create_by,update_by FROM t_user_1 WHERE deleted=0 UNION ALL SELECT user_id,username,password,deleted,version,create_time,update_time,create_by,update_by FROM t_user_2 WHERE deleted=0 UNION ALL SELECT user_id,username,password,deleted,version,create_time,update_time,create_by,update_by FROM t_user_3 WHERE deleted=0 UNION ALL SELECT user_id,username,password,deleted,version,create_time,update_time,create_by,update_by FROM t_user_4 WHERE deleted=0
通过日志我们不难看出,shardingsphere使用了 UNION ALL
连接所有的分表结果集
数据更新:
@Test
void shardingUpdate(){
LambdaUpdateWrapper<User> updateWrapper = new LambdaUpdateWrapper<>();
updateWrapper.eq(User::getUserId, 1012384350592827392L);
User user = new User();
//user7
user.setUsername("update7");
userGateway.update(user, updateWrapper);
}
日志输出:
ShardingSphere-SQL : Logic SQL: UPDATE t_user SET username=?, version=? WHERE deleted=0 AND (user_id = ? AND version = ?)
ShardingSphere-SQL : Actual SQL: shardingmaster ::: UPDATE t_user_2 SET username=?, version=? WHERE deleted=0 AND (user_id = ? AND version = ?) ::: [update7, 1, 1012384350592827392, 0]
数据删除:
@Test
void shardingRemoveAll(){
List<Long> list = userGateway.list().stream().map(User::getUserId).toList();
userGateway.removeBatchByIds(list);
}
日志输出:
ShardingSphere-SQL : Logic SQL: UPDATE t_user SET deleted=1 WHERE user_id IN ( ? , .... ) AND deleted=0
ShardingSphere-SQL : Actual SQL: shardingmaster ::: UPDATE t_user_0 SET deleted=1 WHERE user_id IN ( ? , .... ) AND deleted=0 ::: [1012384350601216000,.....]
ShardingSphere-SQL : Actual SQL: shardingmaster ::: UPDATE t_user_1 SET deleted=1 WHERE user_id IN ( ? , .... ) AND deleted=0 ::: [1012384350601216000,....]
ShardingSphere-SQL : Actual SQL: shardingmaster ::: UPDATE t_user_2 SET deleted=1 WHERE user_id IN ( ? ,.... ) AND deleted=0 ::: [1012384350601216000, ....]
ShardingSphere-SQL : Actual SQL: shardingmaster ::: UPDATE t_user_3 SET deleted=1 WHERE user_id IN ( ? ,.... ) AND deleted=0 ::: [1012384350601216000,....]
ShardingSphere-SQL : Actual SQL: shardingmaster ::: UPDATE t_user_4 SET deleted=1 WHERE user_id IN
或许有的人会有疑问,为何这里的删除用的却是update语句?
这是因为作者启用mp的逻辑删除插件
至此,多数据源与分表分库的集成 及基本的crud都测试完毕了
题外话:
如果想实现运行时态的 分表分库数据源 添加和移除 可以参考Java API :: ShardingSphere (apache.org) 配合dynamic-datasource
后续有空再单独一篇来叙述如何实现运行时态的 分表分库数据源 添加和移除