SpringBoot + MyBatisPlus + ShardingJDBC 分库分表读写分离整合

栏目: Java · 发布时间: 5年前

内容简介:本文描述在本地数据库模拟分库分表、读写分离的整合实现,假定会员数据按照 ID 取模进行分库分表,分为 2 个主库,每个库分配一个读库,累计 100 张表。如下表所示:本文主要展示核心代码,部分如 Controller、Service 层的测试代码实现非常简单,故而省略这部分代码。模拟过程没有实际做主从同步,写入“主库”中的数据并不能自动同步至“从库”,因此,插入数据后,需要手动写入数据至对应的从库,并且可对数据进行差异写入,测试查询时可根据差异来判断读写分离是否生效。

本文描述在本地数据库模拟分库分表、读写分离的整合实现,假定会员数据按照 ID 取模进行分库分表,分为 2 个主库,每个库分配一个读库,累计 100 张表。如下表所示:

主/从
user_1 t_user_00 ~ t_user_49
user_slave_1 t_user_00 ~ t_user_49
user_2 t_user_50 ~ t_user_99
user_slave_2 t_user_50 ~ t_user_99

本文主要展示核心代码,部分如 Controller、Service 层的测试代码实现非常简单,故而省略这部分代码。

依赖版本

<dependency>
    <groupId>org.springframework.boot</groupId>
    <artifactId>spring-boot-starter</artifactId>
    <version>2.1.3.RELEASE</version>
</dependency>

<dependency>
    <groupId>org.springframework.boot</groupId>
    <artifactId>spring-boot-starter-web</artifactId>
    <version>2.1.3.RELEASE</version>
</dependency>

<dependency>
    <groupId>org.springframework.boot</groupId>
    <artifactId>spring-boot-starter-aop</artifactId>
    <version>2.1.3.RELEASE</version>
</dependency>
<dependency>
    <groupId>io.shardingsphere</groupId>
    <artifactId>sharding-jdbc</artifactId>
    <version>3.0.0.M1</version>
</dependency>

<dependency>
    <groupId>com.alibaba</groupId>
    <artifactId>druid</artifactId>
    <version>1.1.12</version>
</dependency>

<dependency>
    <groupId>com.baomidou</groupId>
    <artifactId>mybatis-plus-boot-starter</artifactId>
    <version>3.1.0</version>
</dependency>

<dependency>
    <groupId>mysql</groupId>
    <artifactId>mysql-connector-java</artifactId>
    <version>5.1.41</version>
</dependency>

数据准备

use user_1;
CREATE TABLE `t_user_00` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `name` varchar(45) DEFAULT NULL,
  `age` int(11) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

CREATE TABLE `t_user_01` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `name` varchar(45) DEFAULT NULL,
  `age` int(11) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

CREATE TABLE `t_user_02` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `name` varchar(45) DEFAULT NULL,
  `age` int(11) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

use user_2;
CREATE TABLE `t_user_50` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `name` varchar(45) DEFAULT NULL,
  `age` int(11) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

CREATE TABLE `t_user_51` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `name` varchar(45) DEFAULT NULL,
  `age` int(11) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

CREATE TABLE `t_user_52` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `name` varchar(45) DEFAULT NULL,
  `age` int(11) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

use user_slave_1;
CREATE TABLE `t_user_00` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `name` varchar(45) DEFAULT NULL,
  `age` int(11) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

CREATE TABLE `t_user_01` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `name` varchar(45) DEFAULT NULL,
  `age` int(11) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

CREATE TABLE `t_user_02` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `name` varchar(45) DEFAULT NULL,
  `age` int(11) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

use user_slave_2;
CREATE TABLE `t_user_50` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `name` varchar(45) DEFAULT NULL,
  `age` int(11) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

CREATE TABLE `t_user_51` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `name` varchar(45) DEFAULT NULL,
  `age` int(11) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

CREATE TABLE `t_user_52` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `name` varchar(45) DEFAULT NULL,
  `age` int(11) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

代码实现

数据源配置

server:
  port: 23333
spring:
  application:
    name: pt-framework-demo
  datasource:
    type: com.alibaba.druid.pool.DruidDataSource

datasource:
  default:
     driver-class-name: com.mysql.jdbc.Driver
     url: jdbc:mysql://127.0.0.1:3307/test?useUnicode=true&characterEncoding=UTF-8&zeroDateTimeBehavior=convertToNull&rewriteBatchedStatements=true&autoReconnect=true&failOverReadOnly=false
     username: root
     password: root
     test-on-borrow: false
     test-while-idle: true
     time-between-eviction-runs-millis: 18800
     filters: mergeStat,wall,slf4j
     connectionProperties: druid.stat.slowSqlMillis=2000
     validationQuery: SELECT 1
     poolPreparedStatements: true
  user:
    master:
      user1:
        driver-class-name: com.mysql.jdbc.Driver
        url: jdbc:mysql://127.0.0.1:3307/user_1?useUnicode=true&characterEncoding=UTF-8&zeroDateTimeBehavior=convertToNull&rewriteBatchedStatements=true&autoReconnect=true&failOverReadOnly=false
        username: root
        password: root
        test-on-borrow: false
        test-while-idle: true
        time-between-eviction-runs-millis: 18800
        filters: mergeStat,wall,slf4j
        connectionProperties: druid.stat.slowSqlMillis=2000
        validationQuery: SELECT 1
        poolPreparedStatements: true
      user2:
        driver-class-name: com.mysql.jdbc.Driver
        url: jdbc:mysql://127.0.0.1:3307/user_2?useUnicode=true&characterEncoding=UTF-8&zeroDateTimeBehavior=convertToNull&rewriteBatchedStatements=true&autoReconnect=true&failOverReadOnly=false
        username: root
        password: root
        test-on-borrow: false
        test-while-idle: true
        time-between-eviction-runs-millis: 18800
        filters: mergeStat,wall,slf4j
        connectionProperties: druid.stat.slowSqlMillis=2000
        validationQuery: SELECT 1
        poolPreparedStatements: true
    slave:
      user1:
        driver-class-name: com.mysql.jdbc.Driver
        url: jdbc:mysql://127.0.0.1:3307/user_slave_1?useUnicode=true&characterEncoding=UTF-8&zeroDateTimeBehavior=convertToNull&rewriteBatchedStatements=true&autoReconnect=true&failOverReadOnly=false
        username: root
        password: root
        test-on-borrow: false
        test-while-idle: true
        time-between-eviction-runs-millis: 18800
        filters: mergeStat,wall,slf4j
        connectionProperties: druid.stat.slowSqlMillis=2000
        validationQuery: SELECT 1
        poolPreparedStatements: true
      user2:
        driver-class-name: com.mysql.jdbc.Driver
        url: jdbc:mysql://127.0.0.1:3307/user_slave_2?useUnicode=true&characterEncoding=UTF-8&zeroDateTimeBehavior=convertToNull&rewriteBatchedStatements=true&autoReconnect=true&failOverReadOnly=false
        username: root
        password: root
        test-on-borrow: false
        test-while-idle: true
        time-between-eviction-runs-millis: 18800
        filters: mergeStat,wall,slf4j
        connectionProperties: druid.stat.slowSqlMillis=2000
        validationQuery: SELECT 1
        poolPreparedStatements: true

主从、读写分离

/**
 * Created by Captain on 01/03/2019.
 */
@Configuration
@MapperScan(basePackages = {"com.xxxx.framework.usermapper"}, sqlSessionFactoryRef = "userShardingSqlSessionFactory")
public class UserShardingDBConfiguration {

    @Value("${spring.datasource.type}")
    private Class<? extends DataSource> dataSourceType;

    private static final String USER_1_MASTER = "dsUser1Master";
    private static final String USER_1_SLAVE = "dsUser1Slave";
    private static final String USER_2_MASTER = "dsUser2Master";
    private static final String USER_2_SLAVE = "dsUser2Slave";
    private static final String USER_SHARDING_1 = "dsMasterSlave1";
    private static final String USER_SHARDING_2 = "dsMasterSlave2";

    private static final String USER_SHARDING_DATA_SOURCE = "userSharding";

    @Bean(USER_1_MASTER)
    @ConfigurationProperties(prefix = "datasource.user.master.user1")
    public DataSource dsUser1(){
        return DataSourceBuilder.create().type(dataSourceType).build();
    }

    @Bean(USER_2_MASTER)
    @ConfigurationProperties(prefix = "datasource.user.master.user2")
    public DataSource dsUser2(){
        return DataSourceBuilder.create().type(dataSourceType).build();
    }

    @Bean(USER_1_SLAVE)
    @ConfigurationProperties(prefix = "datasource.user.slave.user1")
    public DataSource dsUserSlave1(){
        return DataSourceBuilder.create().type(dataSourceType).build();
    }

    /**
     * user_2
     * @return
     */
    @Bean(USER_2_SLAVE)
    @ConfigurationProperties(prefix = "datasource.user.slave.user2")
    public DataSource dsUserSlave2(){
        return DataSourceBuilder.create().type(dataSourceType).build();
    }

    @Bean(USER_SHARDING_1)
    public DataSource masterSlave1(@Qualifier(USER_1_MASTER) DataSource dsUser1,@Qualifier(USER_1_SLAVE) DataSource dsUserSlave1) throws Exception {
        Map<String,DataSource> dataSourceMap = new HashMap<>();
        dataSourceMap.put(USER_1_MASTER, dsUser1);
        dataSourceMap.put(USER_1_SLAVE, dsUserSlave1);
        MasterSlaveRuleConfiguration ruleConfiguration = new MasterSlaveRuleConfiguration("dsUser1", USER_1_MASTER, Lists.newArrayList(USER_1_SLAVE));
        return MasterSlaveDataSourceFactory.createDataSource(dataSourceMap, ruleConfiguration, new ConcurrentHashMap<>());
    }

    @Bean(USER_SHARDING_2)
    public DataSource masterSlave2(@Qualifier(USER_2_MASTER) DataSource dsUser2,@Qualifier(USER_2_SLAVE) DataSource dsUserSlave2) throws Exception {
        Map<String,DataSource> dataSourceMap = new HashMap<>();
        dataSourceMap.put(USER_2_MASTER, dsUser2);
        dataSourceMap.put(USER_2_SLAVE, dsUserSlave2);
        MasterSlaveRuleConfiguration ruleConfiguration = new MasterSlaveRuleConfiguration("dsUser2", USER_2_MASTER, Lists.newArrayList(USER_2_SLAVE));
        return MasterSlaveDataSourceFactory.createDataSource(dataSourceMap, ruleConfiguration, new ConcurrentHashMap<>());
    }

    @Bean(USER_SHARDING_DATA_SOURCE)
    public DataSource dsUser(@Qualifier(USER_SHARDING_1) DataSource dsUser1, @Qualifier(USER_SHARDING_2) DataSource dsUser2) throws Exception {
        Map<String, DataSource> dataSourceMap = new HashMap<>();
        dataSourceMap.put("dsUser1", dsUser1);
        dataSourceMap.put("dsUser2", dsUser2);
        ShardingRuleConfiguration userRule = getUserRule();
        userRule.setDefaultDataSourceName("dsUser");
        return ShardingDataSourceFactory.createDataSource(dataSourceMap, userRule, new ConcurrentHashMap<>(), new Properties());
    }

    /**
     * 配置分片规则
     * @return
     */
    private ShardingRuleConfiguration getUserRule(){
        ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
        shardingRuleConfig.setDefaultDatabaseShardingStrategyConfig(new StandardShardingStrategyConfiguration("id", new MemberIdShardingSchemeAlgorithm()));
        shardingRuleConfig.setDefaultTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("id",new MemberIdShardingTableAlgorithm()));
        shardingRuleConfig.getBindingTableGroups().add("t_user");
        return shardingRuleConfig;
    }

    @Bean("userShardingSqlSessionFactory")
    public SqlSessionFactory userSqlSessionFactory(@Qualifier(USER_SHARDING_DATA_SOURCE) DataSource dataSource) throws Exception{
        MybatisSqlSessionFactoryBean sqlSessionFactoryBean = new MybatisSqlSessionFactoryBean();
        sqlSessionFactoryBean.setDataSource(dataSource);
        sqlSessionFactoryBean.setMapperLocations(new PathMatchingResourcePatternResolver().getResources("classpath:usermapper/*.xml"));
        return sqlSessionFactoryBean.getObject();
    }

    @Bean("userTransaction")
    public DataSourceTransactionManager userTransactionManager(@Qualifier(USER_SHARDING_DATA_SOURCE) DataSource dataSource){
        return new DataSourceTransactionManager(dataSource);
    }

}

分库策略

/**
 * CoreUser 分库策略
 * Created by Captain on 01/03/2019.
 */
public class MemberIdShardingSchemeAlgorithm implements PreciseShardingAlgorithm<Integer>  {
    @Override
    public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<Integer> shardingValue) {
        for ( String str : availableTargetNames ){
            int index = shardingValue.getValue() % 100;
            return str + (index > 49 ? "2" : "1");
        }
        return null;
    }

}

分表策略

/**
 * 会员信息分表策略,按照 id 分表
 * Created by Captain on 04/03/2019.
 */
public class MemberIdShardingTableAlgorithm implements PreciseShardingAlgorithm<Integer> {
    @Override
    public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<Integer> shardingValue) {
        int index = shardingValue.getValue() % 100;
        return shardingValue.getLogicTableName() + "_" + (index < 10 ? "0" + index : index + "");
    }
}

实体类

/**
 * Created by Captain on 01/03/2019.
 */
@TableName("t_user")
public class User {

    @TableId(type = IdType.INPUT)
    private Integer id;
    private String name;
    private Integer age;

    public Integer getId() {
        return id;
    }

    public void setId(Integer id) {
        this.id = id;
    }

    public String getName() {
        return name;
    }

    public void setName(String name) {
        this.name = name;
    }

    public Integer getAge() {
        return age;
    }

    public void setAge(Integer age) {
        this.age = age;
    }
}

Mapper

/**
 * Created by Captain on 04/03/2019.
 */
public interface UserMapper extends BaseMapper<User> {

}

测试预期

模拟过程没有实际做主从同步,写入“主库”中的数据并不能自动同步至“从库”,因此,插入数据后,需要手动写入数据至对应的从库,并且可对数据进行差异写入,测试查询时可根据差异来判断读写分离是否生效。

测试用例 预期结果
插入数据 id 指定为 8902 user_1 中数据写入成功
插入数据 id 指定为 8952 user_2 中数据写入成功
查询 id 为 8902 的数据 查询到 user_slave_1 中的结果
查询 id 为 8952 的数据 查询到 user_slave_2 中的结果

以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持 码农网

查看所有标签

猜你喜欢:

本站部分资源来源于网络,本站转载出于传递更多信息之目的,版权归原作者或者来源机构所有,如转载稿涉及版权问题,请联系我们

搜索引擎

搜索引擎

W.Bruce Croft、Donald Metzler、Trevor Strohman / 刘挺、秦兵、张宇、车万翔 / 机械工业出版社 / 2010-6-1 / 56.00元

本书介绍了信息检索(IR)中的关键问题,以及这些问题如何影响搜索引擎的设计与实现,并且用数学模型强化了重要的概念。对于网络搜素引擎这一重要的话题,书中主要涵盖了在网络上广泛使用的搜索技术。 本书适用于高等院校计算机科学或计算机工程专业的本科生、研究生,对于专业人士而言,本书也不失为一本理想的入门教材。一起来看看 《搜索引擎》 这本书的介绍吧!

HTML 压缩/解压工具
HTML 压缩/解压工具

在线压缩/解压 HTML 代码

HEX CMYK 转换工具
HEX CMYK 转换工具

HEX CMYK 互转工具

HSV CMYK 转换工具
HSV CMYK 转换工具

HSV CMYK互换工具