当前位置: 首页 > article >正文

Caffenie配合Redis做两级缓存,Redis发布订阅实现缓存一致更新

一、什么是两级缓存

在项目中。一级缓存用Caffeine,二级缓存用Redis,查询数据时首先查本地的Caffeine缓存,没有命中再通过网络去访问Redis缓存,还是没有命中再查数据库。具体流程如下
在这里插入图片描述

二、简单的二级缓存实现-v1

目录结构
在这里插入图片描述

2.1 double-cache模块主要文件

在这里插入图片描述

pom文件

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>org.example</groupId>
    <artifactId>double-cache</artifactId>
    <version>1.0-SNAPSHOT</version>

    <parent>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-parent</artifactId>
        <version>2.7.2</version>
        <relativePath/>
    </parent>

    <properties>
        <maven.compiler.source>8</maven.compiler.source>
        <maven.compiler.target>8</maven.compiler.target>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
        </dependency>

        <dependency>
            <groupId>com.github.ben-manes.caffeine</groupId>
            <artifactId>caffeine</artifactId>
            <version>2.9.2</version>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-redis</artifactId>
        </dependency>
    </dependencies>

</project>

2.2 测试模块的主要文件

在这里插入图片描述

OrderServiceImpl

@Slf4j
@Service
@RequiredArgsConstructor
public class OrderServiceImpl extends ServiceImpl<OrderMapper, Order> implements OrderService {
    private final OrderMapper orderMapper;
    private final Cache cache;
    private final RedisTemplate redisTemplate;
    @Override
    public Order getOrderById(Long id) {
        String key = CacheConstant.ORDER + id;
        Order order = (Order) cache.get(key,
                k -> {
                    //先查询 Redis
                    Object obj = redisTemplate.opsForValue().get(k);
                    if (Objects.nonNull(obj)) {
                        log.info("get data from redis");
                        return obj;
                    }
                    // Redis没有则查询 DB
                    log.info("get data from database");
                    Order myOrder = orderMapper.selectOne(new LambdaQueryWrapper<Order>()
                            .eq(Order::getId, id));
                    redisTemplate.opsForValue().set(k, myOrder, 120, TimeUnit.SECONDS);
                    return myOrder;
                });
        return order;
    }
    @Override
    public void updateOrder(Order order) {
        log.info("update order data");
        String key = CacheConstant.ORDER + order.getId();
        orderMapper.updateById(order);
        //修改 Redis
        redisTemplate.opsForValue().set(key, order, 120, TimeUnit.SECONDS);
        // 修改本地缓存
        cache.put(key, order);
    }
    @Override
    public void deleteOrder(Long id) {
        log.info("delete order");
        orderMapper.deleteById(id);
        String key = CacheConstant.ORDER + id;
        redisTemplate.delete(key);
        cache.invalidate(key);
    }
}

application.yml

server:
  port: 8090

spring:
  application:
    name: test-demo
  datasource:
    url: jdbc:mysql://localhost:3306/ktl?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC
    username: root
    password: root
    driver-class-name: com.mysql.cj.jdbc.Driver
  redis:
    host: 192.168.200.131
    port: 6379
    database: 0
    timeout: 10000ms
    lettuce:
      pool:
        max-active: 8
        max-wait: -1ms
        max-idle: 8
        min-idle: 0
    password: root

logging:
  level:
    com.cn.dc: debug
    org.springframework: warn

pom文件

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>org.example</groupId>
    <artifactId>testcache</artifactId>
    <version>1.0-SNAPSHOT</version>

    <parent>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-parent</artifactId>
        <version>2.7.2</version>
        <relativePath/>
    </parent>
    <properties>
        <maven.compiler.source>8</maven.compiler.source>
        <maven.compiler.target>8</maven.compiler.target>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
        <mybatis-plus.version>3.3.2</mybatis-plus.version>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.example</groupId>
            <artifactId>double-cache</artifactId>
            <version>1.0-SNAPSHOT</version>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
        </dependency>
        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <scope>runtime</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.commons</groupId>
            <artifactId>commons-pool2</artifactId>
            <version>2.8.1</version>
        </dependency>
        <dependency>
            <groupId>com.baomidou</groupId>
            <artifactId>mybatis-plus-boot-starter</artifactId>
            <version>${mybatis-plus.version}</version>
        </dependency>
        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <version>1.18.12</version>
            <scope>provided</scope>
        </dependency>
    </dependencies>
</project>

2.3 测试

测试get/{id}接口的时候,会把从db查出来的数据放入到redis和Caffeine中,在有效期内不需要再次从数据库查询

三、二级缓存实现-v2

v1的代码入侵性很强,因此加入了注解@Cacheable@CachePut@CacheEvict

3.1 double-cache模块

在这里插入图片描述

3.2 测试模块

OrderServiceImpl

@Slf4j
@Service
@RequiredArgsConstructor
public class OrderServiceImpl extends ServiceImpl<OrderMapper, Order> implements OrderService {
    private final OrderMapper orderMapper;
    private final RedisTemplate redisTemplate;

    @Override
    @Cacheable(value = "order",key = "#id")
//@Cacheable(cacheNames = "order",key = "#p0")
    public Order getOrderById(Long id) {
        String key= CacheConstant.ORDER + id;
        //先查询 Redis
        Object obj = redisTemplate.opsForValue().get(key);
        if (Objects.nonNull(obj)){
            log.info("get data from redis");
            return (Order) obj;
        }
        // Redis没有则查询 DB
        log.info("get data from database");
        Order myOrder = orderMapper.selectOne(new LambdaQueryWrapper<Order>()
                .eq(Order::getId, id));
        redisTemplate.opsForValue().set(key,myOrder,120, TimeUnit.SECONDS);
        return myOrder;
    }


    @Override
    @CachePut(cacheNames = "order",key = "#order.id")
    public Order updateOrder(Order order) {
        log.info("update order data");
        orderMapper.updateById(order);
        //修改 Redis
        redisTemplate.opsForValue().set(CacheConstant.ORDER + order.getId(),
                order, 120, TimeUnit.SECONDS);
        return order;
    }

    @Override
    @CacheEvict(cacheNames = "order",key = "#id")
    public void deleteOrder(Long id) {
        log.info("delete order");
        orderMapper.deleteById(id);
        redisTemplate.delete(CacheConstant.ORDER + id);
    }
}

四、二级缓存实现-v3-aop结合自定义注解

模仿spring通过注解管理缓存的方式,我们也可以选择自定义注解,然后在切面中处理缓存,从而将对业务代码的入侵降到最低。

首先定义一个注解,用于添加在需要操作缓存的方法上:

@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
@Documented
public @interface DoubleCache {
    String cacheName();
    String key();	//支持springEl表达式
    long l2TimeOut() default 120;
    CacheType type() default CacheType.FULL;
}

我们使用cacheName + key作为缓存的真正key(仅存在一个Cache中,不做CacheName隔离),l2TimeOut为可以设置的二级缓存Redis的过期时间,type是一个枚举类型的变量,表示操作缓存的类型,枚举类型定义如下:

public enum CacheType {
    FULL,   //存取
    PUT,    //只存
    DELETE  //删除
}

因为要使key支持springEl表达式,所以需要写一个方法,使用表达式解析器解析参数:

public class ElParser {
    public static String parse(String elString, TreeMap<String,Object> map){
        elString=String.format("#{%s}",elString);
        //创建表达式解析器
        ExpressionParser parser = new SpelExpressionParser();
        //通过evaluationContext.setVariable可以在上下文中设定变量。
        EvaluationContext context = new StandardEvaluationContext();
        map.entrySet().forEach(entry->
                context.setVariable(entry.getKey(),entry.getValue())
        );

        //解析表达式
        Expression expression = parser.parseExpression(elString, new TemplateParserContext());
        //使用Expression.getValue()获取表达式的值,这里传入了Evaluation上下文
        String value = expression.getValue(context, String.class);
        return value;
    }
}

至于Cache相关参数的配置,我们沿用V1版本中的配置即可。准备工作做完了,下面我们定义切面,在切面中操作Cache来读写Caffeine的缓存,操作RedisTemplate读写Redis缓存。

@Slf4j
@Component
@Aspect
@AllArgsConstructor
public class CacheAspect {
    private final Cache cache;
    private final RedisTemplate redisTemplate;
    private final String COLON = ":";

    @Pointcut("@annotation(org.example.doublecache.annotation.DoubleCache)")
    public void cacheAspect() {
    }

    @Around("cacheAspect()")
    public Object doAround(ProceedingJoinPoint point) throws Throwable {
        MethodSignature signature = (MethodSignature) point.getSignature();
        Method method = signature.getMethod();

//        if (!method.isAnnotationPresent(DoubleCache.class))
//            return null;

        //拼接解析springEl表达式的map
        String[] paramNames = signature.getParameterNames();
        Object[] args = point.getArgs();
        TreeMap<String, Object> treeMap = new TreeMap<>();
        for (int i = 0; i < paramNames.length; i++) {
            treeMap.put(paramNames[i],args[i]);
        }

        DoubleCache annotation = method.getAnnotation(DoubleCache.class);
        String elResult = ElParser.parse(annotation.key(), treeMap);
        String realKey = annotation.cacheName() + COLON + elResult;

        //强制更新
        if (annotation.type()== CacheType.PUT){
            Object object = point.proceed();
            redisTemplate.opsForValue().set(realKey, object,annotation.l2TimeOut(), TimeUnit.SECONDS);
            cache.put(realKey, object);
            return object;
        }
        //删除
        else if (annotation.type()== CacheType.DELETE){
            redisTemplate.delete(realKey);
            cache.invalidate(realKey);
            return point.proceed();
        }

        //读写,查询Caffeine
        Object caffeineCache = cache.getIfPresent(realKey);
        if (Objects.nonNull(caffeineCache)) {
            log.info("get data from caffeine");
            return caffeineCache;
        }

        //查询Redis
        Object redisCache = redisTemplate.opsForValue().get(realKey);
        if (Objects.nonNull(redisCache)) {
            log.info("get data from redis");
            cache.put(realKey, redisCache);
            return redisCache;
        }

        log.info("get data from database");
        Object object = point.proceed();
        if (Objects.nonNull(object)){
            //写回Redis
            redisTemplate.opsForValue().set(realKey, object,annotation.l2TimeOut(), TimeUnit.SECONDS);
            //写入Caffeine
            cache.put(realKey, object);
        }
        return object;
    }
}

4.1 double-cache模块

在这里插入图片描述

4.2 测试模块

在这里插入图片描述

OrderServiceImpl修改如下

@Slf4j
@Service
@RequiredArgsConstructor
public class OrderServiceImpl extends ServiceImpl<OrderMapper, Order> implements OrderService {
    private final OrderMapper orderMapper;
    @Override
    @DoubleCache(cacheName = "order", key = "#id",
            type = CacheType.FULL)
    public Order getOrderById(Long id) {
        Order myOrder = orderMapper.selectOne(new LambdaQueryWrapper<Order>()
                .eq(Order::getId, id));
        return myOrder;
    }

    @Override
    @DoubleCache(cacheName = "order",key = "#order.id",
            type = CacheType.PUT)
    public Order updateOrder(Order order) {
        orderMapper.updateById(order);
        return order;
    }

    @Override
    @DoubleCache(cacheName = "order",key = "#id",
            type = CacheType.DELETE)
    public void deleteOrder(Long id) {
        orderMapper.deleteById(id);
    }

    @Override
    @DoubleCache(cacheName = "order",key = "#id")
    public Order getOrderByIdAndStatus(Long id,Integer status) {
        Order myOrder = orderMapper.selectOne(new LambdaQueryWrapper<Order>()
                .eq(Order::getId, id)
                .eq(Order::getStatus,status));
        return myOrder;
    }

TestApplication上加@EnableCaching

4.3 测试

从数据库10ms+,生产中会走网络通信会更长。
从Caffeine平均4ms

在这里插入图片描述

五、两级缓存-改造版本(符合JSR107规范+保证缓存数据一致性)

上面v3虽然说能够实现功能,但实现手法还是太粗糙了,并且遗留了一些问题没有处理。下面围绕两个方面进行进一步的改造:

  • JSR107定义了缓存使用规范,spring中提供了基于这个规范的接口,所以我们可以直接使用spring中的接口进行Caffeine和Redis两级缓存的整合改造
  • 在分布式环境下,如果一台主机的本地缓存进行修改,需要通知其他主机修改本地缓存,解决分布式环境下本地缓存一致性问题

5.1 JSR107规范

文档:JSR107中文版(非官方)
JSR107缓存规范中定义了5个核心接口,分别是CachingProvider,CacheManager,Cache, EntryExpiry,参考下面这张图,可以看到除了Entry和Expiry以外,从上到下都是一对多的包含关系。
在这里插入图片描述

从上面这张图我们可以看出,一个应用可以创建并管理多个CachingProvider,同样一个CachingProvider也可以管理多个CacheManager,缓存管理器CacheManager中则维护了多个Cache

Cache是一个类似Map的数据结构,Entry就是其中存储的每一个key-value数据对,并且每个Entry都有一个过期时间Expiry。而我们在使用spring集成第三方的缓存时,只需要实现CacheCacheManager这两个接口就可以了,下面分别具体来看一下。

接口介绍
CachingProvider缓存提供者。 定义了创建、配置、获取、管理和控制多个 CacheManager。一个应用可以在运行期访问多个 CachingProvider。
CacheManager缓存管理器。 定义了创建、配置、获取、管理和控制多个唯一命名的 Cache,这些 Cache 存在于 CacheManager 的上下文中。一个 CacheManager 仅被一个 CachingProvider 所拥有。
Cache缓存组件。 是一个类似 Map 的数据结构并临时存储以 key 为索引的值。一个 Cache 仅被一个 CacheManager 所拥有。
Entry键值对。 是一个存储在 Cache 中的 key-value 对。
Expiry有效期。 每一个存储在 Cache 中的条目有一个定义的有效期。一旦超过这个时间,条目为过期的状态。一旦过期,条目将不可访问、更新和删除。缓存有效期可以通过 ExpiryPolicy 设置。

5.2 根据JSR107规范改造

5.2.1 Cache

spring中的Cache接口规范了缓存组件的定义,包含了缓存的各种操作,实现具体缓存操作的管理。例如我们熟悉的RedisCacheEhCacheCache等,都实现了这个接口。
Cache接口中,定义了getputevictclear等方法,分别对应缓存的存入、取出、删除、清空操作。不过我们这里不直接使用Cache接口,AbstractValueAdaptingCache是一个抽象类,它已经实现了Cache接口,是spring在Cache接口的基础上帮助我们进行了一层封装,所以我们直接继承这个类就可以。

在这里插入图片描述

继承AbstractValueAdaptingCache抽象类后,除了创建Cache的构造方法外,还需要实现下面的几个方法:

// 在缓存中实际执行查找的操作,父类的get()方法会调用这个方法
protected abstract Object lookup(Object key);

// 通过key获取缓存值,如果没有找到,会调用valueLoader的call()方法
public <T> T get(Object key, Callable<T> valueLoader);

// 将数据放入缓存中
public void put(Object key, Object value);

// 删除缓存
public void evict(Object key);

// 清空缓存中所有数据
public void clear();

// 获取缓存名称,一般在CacheManager创建时指定
String getName();

// 获取实际使用的缓存
Object getNativeCache();

因为要整合RedisTemplateCaffeine的Cache,所以这些都需要在缓存的构造方法中传入,除此之外构造方法中还需要再传出缓存名称cacheName,以及在配置文件中实际配置的一些缓存参数。先看一下构造方法的实现:

public class DoubleCache extends AbstractValueAdaptingCache {
    private String cacheName;
    private RedisTemplate<Object, Object> redisTemplate;
    private Cache<Object, Object> caffeineCache;
    private DoubleCacheConfig doubleCacheConfig;

    protected DoubleCache(boolean allowNullValues) {
        super(allowNullValues);
    }

    public DoubleCache(String cacheName,RedisTemplate<Object, Object> redisTemplate,
                       Cache<Object, Object> caffeineCache,
                       DoubleCacheConfig doubleCacheConfig){
        super(doubleCacheConfig.getAllowNull());
        this.cacheName=cacheName;
        this.redisTemplate=redisTemplate;
        this.caffeineCache=caffeineCache;
        this.doubleCacheConfig=doubleCacheConfig;
    }
    //...
}

抽象父类的构造方法中只有一个boolean类型的参数allowNullValues,表示是否允许缓存对象为null。除此之外,AbstractValueAdaptingCache中还定义了两个包装方法来配合这个参数进行使用,分别是toStoreValue()fromStoreValue(),特殊用途是用于在缓存null对象时进行包装、以及在获取时进行解析并返回。

我们之后会在CacheManager中调用后面这个自己实现的构造方法,来实例化Cache对象,参数中DoubleCacheConfig是使用@ConfigurationProperties读取的yml配置文件封装的数据对象,会在后面使用。

当一个方法添加了@Cacheable注解时,执行时会先调用父类AbstractValueAdaptingCache中的get(key)方法,它会再调用我们自己实现的lookup方法。在实际执行查找操作的lookup方法中,我们的逻辑仍然是先查找Caffeine、没有找到时再查找Redis

@Override
protected Object lookup(Object key) {
    // 先从caffeine中查找
    Object obj = caffeineCache.getIfPresent(key);
    if (Objects.nonNull(obj)){
        log.info("get data from caffeine");
        return obj;
    }

    //再从redis中查找
    String redisKey=this.name+":"+ key;
    obj = redisTemplate.opsForValue().get(redisKey);
    if (Objects.nonNull(obj)){
        log.info("get data from redis");
        caffeineCache.put(key,obj);
    }
    return obj;
}

如果lookup方法的返回结果不为null,那么就会直接返回结果给调用方。如果返回为null时,就会执行原方法,执行完成后调用put方法,将数据放入缓存中。接下来我们实现put方法:

@Override
public void put(Object key, Object value) {
    if(!isAllowNullValues() && Objects.isNull(value)){
        log.error("the value NULL will not be cached");
        return;
    }

    //使用 toStoreValue(value) 包装,解决caffeine不能存null的问题
    caffeineCache.put(key,toStoreValue(value));

    // null对象只存在caffeine中一份就够了,不用存redis了
    if (Objects.isNull(value))
        return;

    String redisKey=this.cacheName +":"+ key;
    Optional<Long> expireOpt = Optional.ofNullable(doubleCacheConfig)
            .map(DoubleCacheConfig::getRedisExpire);
    if (expireOpt.isPresent()){
        redisTemplate.opsForValue().set(redisKey,toStoreValue(value),
                expireOpt.get(), TimeUnit.SECONDS);
    }else{
        redisTemplate.opsForValue().set(redisKey,toStoreValue(value));
    }
}

上面我们对于是否允许缓存空对象进行了判断,能够缓存空对象的好处之一就是可以避免缓存穿透。需要注意的是,Caffeine中是不能直接缓存null的,因此可以使用父类提供的toStoreValue()方法,将它包装成一个NullValue类型。在取出对象时,如果是NullValue,也不用我们自己再去调用fromStoreValue()将这个包装类型还原,父类的get方法中已经帮我们做好了。

另外,上面在put方法中缓存空对象时,只在Caffeine缓存中一份即可,可以不用在Redis中再存一份。

缓存的删除方法evict()和清空方法clear()的实现就比较简单了,直接删除一跳或全部数据即可:

@Override
public void evict(Object key) {
    redisTemplate.delete(this.cacheName +":"+ key);
    caffeineCache.invalidate(key);
}

@Override
public void clear() {
    Set<Object> keys = redisTemplate.keys(this.cacheName.concat(":*"));
    for (Object key : keys) {
        redisTemplate.delete(String.valueOf(key));
    }
    caffeineCache.invalidateAll();
}

获取缓存cacheName和实际缓存的方法实现:

@Override
public String getName() {
    return this.cacheName;
}
@Override
public Object getNativeCache() {
    return this;
}

最后,我们再来看一下带有两个参数的get方法,为什么把这个方法放到最后来说呢,因为如果我们只是使用注解来管理缓存的话,那么这个方法不会被调用到,简单看一下实现:

@Override
public <T> T get(Object key, Callable<T> valueLoader) {
    ReentrantLock lock=new ReentrantLock();
    try{
        lock.lock();//加锁
        Object obj = lookup(key);
        if (Objects.nonNull(obj)){
            return (T)obj;
        }
        //没有找到
        obj = valueLoader.call();
        put(key,obj);//放入缓存
        return (T)obj;
    }catch (Exception e){
        log.error(e.getMessage());
    }finally {
        lock.unlock();
    }
    return null;
}

方法的实现比较容易理解,还是先调用lookup方法寻找是否已经缓存了对象,如果没有找到那么就调用Callable中的call方法进行获取,并在获取完成后存入到缓存中去。至于这个方法如何使用,具体代码我们放在后面使用这一块再看。

需要注意的是,这个方法的接口注释中强调了需要我们自己来保证方法同步,因此这里使用了ReentrantLock进行了加锁操作。到这里,Cache的实现就完成了。
DoubleCache类文件代码如下:

/**
 * 这个是没有加消息的最初版本
 **/
import com.github.benmanes.caffeine.cache.Cache;
import lombok.extern.slf4j.Slf4j;
import org.example.doublecache.config.DoubleCacheConfig;
import org.springframework.cache.support.AbstractValueAdaptingCache;
import org.springframework.data.redis.core.RedisTemplate;

import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;

@Slf4j
public class DoubleCacheV0 extends AbstractValueAdaptingCache {
    private String cacheName;
    private RedisTemplate<Object, Object> redisTemplate;
    private Cache<Object, Object> caffeineCache;
    private DoubleCacheConfig doubleCacheConfig;

    protected DoubleCacheV0(boolean allowNullValues) {
        super(allowNullValues);
    }

    public DoubleCacheV0(String cacheName, RedisTemplate<Object, Object> redisTemplate,
                         Cache<Object, Object> caffeineCache,
                         DoubleCacheConfig doubleCacheConfig){
        super(doubleCacheConfig.getAllowNull());
        this.cacheName =cacheName;
        this.redisTemplate=redisTemplate;
        this.caffeineCache=caffeineCache;
        this.doubleCacheConfig=doubleCacheConfig;
    }

    //使用注解时不走这个方法,实际走父类的get方法
    @Override
    public <T> T get(Object key, Callable<T> valueLoader) {
        ReentrantLock lock=new ReentrantLock();
        try{
            lock.lock();//加锁

            Object obj = lookup(key);
            if (Objects.nonNull(obj)){
                return (T)obj;
            }
            //没有找到
            obj = valueLoader.call();
            //放入缓存
            put(key,obj);
            return (T)obj;
        }catch (Exception e){
            log.error(e.getMessage());
        }finally {
            lock.unlock();
        }
        return null;
    }

    @Override
    protected Object lookup(Object key) {
        // 先从caffeine中查找
        Object obj = caffeineCache.getIfPresent(key);
        if (Objects.nonNull(obj)){
            log.info("get data from caffeine");
            return obj; //不用fromStoreValue,否则返回的是null,会再查数据库
        }

        //再从redis中查找
        String redisKey=this.cacheName +":"+ key;
        obj = redisTemplate.opsForValue().get(redisKey);
        if (Objects.nonNull(obj)){
            log.info("get data from redis");
            caffeineCache.put(key,obj);
        }
        return obj;
    }

    @Override
    public void put(Object key, Object value) {
        if(!isAllowNullValues() && Objects.isNull(value)){
            log.error("the value NULL will not be cached");
            return;
        }

        //使用 toStoreValue(value) 包装,解决caffeine不能存null的问题
        //caffeineCache.put(key,value);
        caffeineCache.put(key,toStoreValue(value));

        // null对象只存在caffeine中一份就够了,不用存redis了
        if (Objects.isNull(value))
            return;

        String redisKey=this.cacheName +":"+ key;
        Optional<Long> expireOpt = Optional.ofNullable(doubleCacheConfig)
                .map(DoubleCacheConfig::getRedisExpire);
        if (expireOpt.isPresent()){
            redisTemplate.opsForValue().set(redisKey,toStoreValue(value),
                    expireOpt.get(), TimeUnit.SECONDS);
        }else{
            redisTemplate.opsForValue().set(redisKey,toStoreValue(value));
        }
    }

    @Override
    public void evict(Object key) {
        redisTemplate.delete(this.cacheName +":"+ key);
        caffeineCache.invalidate(key);
    }

    @Override
    public void clear() {
        //如果是正式环境,避免使用keys命令
        Set<Object> keys = redisTemplate.keys(this.cacheName.concat(":*"));
        for (Object key : keys) {
            redisTemplate.delete(String.valueOf(key));
        }
        caffeineCache.invalidateAll();
    }

    @Override
    public String getName() {
        return this.cacheName;
    }

    @Override
    public Object getNativeCache() {
        return this;
    }

}
5.2.2 CacheManager

下面我们接着看另一个重要的接口CacheManager
从名字就可以看出,CacheManager是一个缓存管理器,它可以被用来管理一组Cache。在上一篇文章的v2版本中,我们使用的CaffeineCacheManager就实现了这个接口,除此之外还有RedisCacheManagerEhCacheCacheManager等也都是通过这个接口实现。

下面我们要自定义一个类实现CacheManager接口,管理上面实现的DoubleCache作为spring中的缓存使用。接口中需要实现的方法只有下面两个:

//根据cacheName获取Cache实例,不存在时进行创建
Cache getCache(String name);

//返回管理的所有cacheName
Collection<String> getCacheNames();

在自定义的缓存管理器中,我们要使用ConcurrentHashMap维护一组不同的Cache,再定义一个构造方法,在参数中传入已经在spring中配置好的RedisTemplate,以及相关的缓存配置参数:

public class DoubleCacheManager implements CacheManager {
    Map<String, Cache> cacheMap = new ConcurrentHashMap<>();
    private RedisTemplate<Object, Object> redisTemplate;
    private DoubleCacheConfig dcConfig;

    public DoubleCacheManager(RedisTemplate<Object, Object> redisTemplate,
                              DoubleCacheConfig doubleCacheConfig) {
        this.redisTemplate = redisTemplate;
        this.dcConfig = doubleCacheConfig;
    }
    //...
}

然后实现getCache方法,逻辑很简单,先根据nameMap中查找对应的Cache,如果找到则直接返回,这个参数name就是上一篇文章中提到的cacheNameCacheManager根据它实现不同Cache的隔离。

如果没有根据名称找到缓存的话,那么新建一个DoubleCache对象,并放入Map中。这里使用的ConcurrentHashMapputIfAbsent()方法放入,避免重复创建Cache以及造成Cache内数据的丢失。具体代码如下:

@Override
public Cache getCache(String name) {
    Cache cache = cacheMap.get(name);
    if (Objects.nonNull(cache)) {
        return cache;
    }

    cache = new DoubleCache(name, redisTemplate, createCaffeineCache(), dcConfig);
    Cache oldCache = cacheMap.putIfAbsent(name, cache);
    return oldCache == null ? cache : oldCache;
}

在上面创建DoubleCache对象的过程中,需要先创建一个CaffeineCache对象作为参数传入,这一过程主要是根据实际项目的配置文件中的具体参数进行初始化,代码如下:

private com.github.benmanes.caffeine.cache.Cache createCaffeineCache(){
    Caffeine<Object, Object> caffeineBuilder = Caffeine.newBuilder();
    Optional<DoubleCacheConfig> dcConfigOpt = Optional.ofNullable(this.dcConfig);
    dcConfigOpt.map(DoubleCacheConfig::getInit)
            .ifPresent(init->caffeineBuilder.initialCapacity(init));
    dcConfigOpt.map(DoubleCacheConfig::getMax)
            .ifPresent(max->caffeineBuilder.maximumSize(max));
    dcConfigOpt.map(DoubleCacheConfig::getExpireAfterWrite)
            .ifPresent(eaw->caffeineBuilder.expireAfterWrite(eaw,TimeUnit.SECONDS));
    dcConfigOpt.map(DoubleCacheConfig::getExpireAfterAccess)
            .ifPresent(eaa->caffeineBuilder.expireAfterAccess(eaa,TimeUnit.SECONDS));
    dcConfigOpt.map(DoubleCacheConfig::getRefreshAfterWrite)
            .ifPresent(raw->caffeineBuilder.refreshAfterWrite(raw,TimeUnit.SECONDS));
    return caffeineBuilder.build();
}

getCacheNames方法很简单,直接返回MapkeySet就可以了,代码如下:

@Override
public Collection<String> getCacheNames() {
    return cacheMap.keySet();
}

DoubleCacheManager总的代码如下

import org.example.doublecache.config.DoubleCacheConfig;
import com.github.benmanes.caffeine.cache.Caffeine;
import org.springframework.cache.Cache;
import org.springframework.cache.CacheManager;
import org.springframework.data.redis.core.RedisTemplate;

import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;

public class DoubleCacheManager implements CacheManager {
    Map<String, Cache> cacheMap = new ConcurrentHashMap<>();
    private RedisTemplate<Object, Object> redisTemplate;
    private DoubleCacheConfig dcConfig;

    public DoubleCacheManager(RedisTemplate<Object, Object> redisTemplate,
                              DoubleCacheConfig doubleCacheConfig) {
        this.redisTemplate = redisTemplate;
        this.dcConfig = doubleCacheConfig;
    }

    @Override
    public Cache getCache(String name) {
        Cache cache = cacheMap.get(name);
        if (Objects.nonNull(cache)) {
            return cache;
        }

        cache = new DoubleCacheV0(name, redisTemplate, createCaffeineCache(), dcConfig);
        Cache oldCache = cacheMap.putIfAbsent(name, cache);
        return oldCache == null ? cache : oldCache;
    }

    @Override
    public Collection<String> getCacheNames() {
        return cacheMap.keySet();
    }

    private com.github.benmanes.caffeine.cache.Cache createCaffeineCache(){
        Caffeine<Object, Object> caffeineBuilder = Caffeine.newBuilder();
        Optional<DoubleCacheConfig> dcConfigOpt = Optional.ofNullable(this.dcConfig);
        dcConfigOpt.map(DoubleCacheConfig::getInit)
                .ifPresent(init->caffeineBuilder.initialCapacity(init));
        dcConfigOpt.map(DoubleCacheConfig::getMax)
                .ifPresent(max->caffeineBuilder.maximumSize(max));
        dcConfigOpt.map(DoubleCacheConfig::getExpireAfterWrite)
                .ifPresent(eaw->caffeineBuilder.expireAfterWrite(eaw,TimeUnit.SECONDS));
        dcConfigOpt.map(DoubleCacheConfig::getExpireAfterAccess)
                .ifPresent(eaa->caffeineBuilder.expireAfterAccess(eaa,TimeUnit.SECONDS));
        dcConfigOpt.map(DoubleCacheConfig::getRefreshAfterWrite)
                .ifPresent(raw->caffeineBuilder.refreshAfterWrite(raw,TimeUnit.SECONDS));
        return caffeineBuilder.build();
    }
}
5.2.3 配置&使用
@Data
@ConfigurationProperties(prefix = "doublecache")
public class DoubleCacheConfig {
    private Boolean allowNull = true;
    private Integer init = 100;
    private Integer max = 1000;
    private Long expireAfterWrite ;
    private Long expireAfterAccess;
    private Long refreshAfterWrite;
    private Long redisExpire;
}

application.yml文件中配置缓存的参数,代码中使用@ConfigurationProperties接收到DoubleCacheConfig类中:

doublecache:
  allowNull: true
  init: 128
  max: 1024
  expireAfterWrite: 30  #Caffeine过期时间
  redisExpire: 60      #Redis缓存过期时间

配置自定义的DoubleCacheManager作为默认的缓存管理器:

@Configuration
@EnableConfigurationProperties({DoubleCacheConfig.class})
public class CacheConfig {
    @Autowired
    DoubleCacheConfig doubleCacheConfig;

    @Bean
    public DoubleCacheManager cacheManager(RedisTemplate<Object,Object> redisTemplate,
                                           DoubleCacheConfig doubleCacheConfig){
        return new DoubleCacheManager(redisTemplate,doubleCacheConfig);
    }
}

Service中的代码还是老样子,不需要在代码中手动操作缓存,只要直接在方法上使用@Cache相关注解即可:

@Service @Slf4j
@AllArgsConstructor
public class OrderServiceImpl implements OrderService {
    private final OrderMapper orderMapper;

    @Cacheable(value = "order",key = "#id")
    public Order getOrderById(Long id) {
        Order myOrder = orderMapper.selectOne(new LambdaQueryWrapper<Order>()
                .eq(Order::getId, id));
        return myOrder;
    }

    @CachePut(cacheNames = "order",key = "#order.id")
    public Order updateOrder(Order order) {
        orderMapper.updateById(order);
        return order;
    }

    @CacheEvict(cacheNames = "order",key = "#id")
    public void deleteOrder(Long id) {
        orderMapper.deleteById(id);
    }
    
    //没有注解,使用get(key,callable)方法
    public Order getOrderById2(Long id) {
        DoubleCacheManager cacheManager = SpringContextUtil.getBean(DoubleCacheManager.class);
        Cache cache = cacheManager.getCache("order");
        Order order =(Order) cache.get(id, (Callable<Object>) () -> {
            log.info("get data from database");
            Order myOrder = orderMapper.selectOne(new LambdaQueryWrapper<Order>()
                    .eq(Order::getId, id));
            return myOrder;
        });
        return order;
    }    
}
5.2.4 测试

ps:只要添加了spring-boot-starter-data-redis依赖,并且application.yml中配置了redis的连接相关的配置,且@EnableCaching,那么使用@Cacheable等注解,spring就会自动使用Redis进行缓存(pojo类需要实现Serializable),并且不需要RestTemplate(就算配置了RestTemplate)也没用。
上面是我项目中测试错误的情况下发现的,原因是spring.factories文件里并没有添加CacheConfig,而且RestTemplate配置的json序列化器没有生效。

  1. 目录
    在这里插入图片描述

在这里插入图片描述

如果出现java: 方法引用无效 找不到符号类似错误,可能是lombok以来笨笨过低,与idea版本不匹配。

5.3 分布式环境改造-保证缓存数据一致性

在分布式环境下,可能会存在各个主机上一级缓存不一致的问题。当一台主机修改了本地缓存后,其他主机是没有感知的,仍然保持了之前的缓存,那么这种情况下就可能取到脏数据。既然我们在项目中已经使用了Redis,那么就可以使用它的发布/订阅功能来使各个节点的缓存进行同步。

5.3.1 定义消息体

在使用Redis发送消息前,需要先定义一个消息对象。其中的数据包括消息要作用于的Cache名称、操作类型、数据以及发出消息的源主机标识:

@Data
@NoArgsConstructor
@AllArgsConstructor
public class CacheMassage implements Serializable {
    private static final long serialVersionUID = -3574997636829868400L;

    private String cacheName;
    private CacheMsgType type;  //标识更新或删除操作
    private Object key;			
    private Object value;
    private String msgSource;   //源主机标识,用来避免重复操作
}

定义一个枚举来标识消息的类型,是要进行更新还是删除操作:

public enum CacheMsgType {
    UPDATE,
    DELETE;
}

消息体中的msgSource是添加的一个消息源主机的标识,添加这个是为了避免收到当前主机发送的消息后,再进行重复操作,也就是说收到本机发出的消息直接丢掉什么都不做就可以了。源主机标识这里使用的是主机ip加项目端口的方式,获取方法如下:

public static String getMsgSource() throws UnknownHostException {
    String host = InetAddress.getLocalHost().getHostAddress();
    Environment env = SpringContextUtil.getBean(Environment.class);
    String port = env.getProperty("server.port");
    return host+":"+port;
}

这样消息体的定义就完成了,之后只要调用redisTemplateconvertAndSend方法就可以把这个对象发布到指定的主题上了。

5.3.2 Redis消息配置

要使用Redis的消息监听功能,需要配置两项内容:

  • MessageListenerAdapter:消息监听适配器,可以在其中指定自定义的监听代理类,并且可以自定义使用哪个方法处理监听逻辑
  • RedisMessageListenerContainer: 一个可以为消息监听器提供异步行为的容器,并且提供消息转换和分派等底层功能
@Configuration
public class MessageConfig {
    public static final String TOPIC="cache.msg";
    
    @Bean
    RedisMessageListenerContainer container(MessageListenerAdapter listenerAdapter,
                                            RedisConnectionFactory redisConnectionFactory){
        RedisMessageListenerContainer container = new RedisMessageListenerContainer();
        container.setConnectionFactory(redisConnectionFactory);
        container.addMessageListener(listenerAdapter, new PatternTopic(TOPIC));
        return container;
    }
    
    @Bean
    MessageListenerAdapter adapter(RedisMessageReceiver receiver){
        return new MessageListenerAdapter(receiver,"receive");
    }   
}

在上面的监听适配器MessageListenerAdapter中,我们传入了一个自定义的RedisMessageReceiver接收并处理消息,并指定使用它的receive方法来处理监听到的消息,下面我们就来看看它如何接收消息并消费。

5.3.3 消息消费逻辑

定义一个类RedisMessageReceiver来接收并消费消息,需要在它的方法中实现以下功能:

  • 反序列化接收到的消息,转换为前面定义的CacheMassage类型对象
  • 根据消息的主机标识判断这条消息是不是本机发出的,如果是那么直接丢弃,只有接收到其他主机发出的消息才进行处理
  • 使用cacheName得到具体使用的那一个DoubleCache实例
  • 根据消息的类型判断要执行的是更新还是删除操作,调用对应的方法
@Slf4j @Component
@AllArgsConstructor
public class RedisMessageReceiver {
    private final RedisTemplate redisTemplate;
    private final DoubleCacheManager manager;

    //接收通知,进行处理
    public void receive(String message) throws UnknownHostException {
        CacheMassage msg = (CacheMassage) redisTemplate
                .getValueSerializer().deserialize(message.getBytes());
        log.info(msg.toString());

        //如果是本机发出的消息,那么不进行处理
        if (msg.getMsgSource().equals(MessageSourceUtil.getMsgSource())){
            log.info("收到本机发出的消息,不做处理");
            return;
        }

        DoubleCache cache = (DoubleCache) manager.getCache(msg.getCacheName());
        if (msg.getType()== CacheMsgType.UPDATE) {
            cache.updateL1Cache(msg.getKey(),msg.getValue());
            log.info("更新本地缓存");
        }

        if (msg.getType()== CacheMsgType.DELETE) {
            log.info("删除本地缓存");
            cache.evictL1Cache(msg.getKey());
        }
    }
}

在上面的代码中,调用了DoubleCache中更新一级缓存方法updateL1Cache、删除一级缓存方法evictL1Cache,我们会后面在DoubleCache中进行添加。

5.3.4 修改DoubleCache

DoubleCache中先添加上面提到的两个方法,由CacheManager获取到具体缓存后调用,进行一级缓存的更新或删除操作:

// 更新一级缓存
public void updateL1Cache(Object key,Object value){
    caffeineCache.put(key,value);
}

// 删除一级缓存
public void evictL1Cache(Object key){
    caffeineCache.invalidate(key);
}

好了,需要修改的都修改完了,那么我们要在什么场合发送消息呢?答案是在DoubleCache中存入缓存的put方法和移除缓存的evict方法中。首先修改put方法,方法中前面的逻辑不变,在最后添加发送消息通知其他节点更新一级缓存的逻辑:

public void put(Object key, Object value) {
	// 省略前面的不变代码...

    //发送信息通知其他节点更新一级缓存	
	CacheMassage cacheMassage
			= new CacheMassage(this.cacheName, CacheMsgType.UPDATE,
			key,value, MessageSourceUtil.getMsgSource());
	redisTemplate.convertAndSend(MessageConfig.TOPIC,cacheMassage);
}

然后修改evict方法,同样保持前面的逻辑不变,在最后添加发送消息的代码:

public void evict(Object key) {
	// 省略前面的不变代码...

    //发送信息通知其他节点删除一级缓存   
    CacheMassage cacheMassage
            = new CacheMassage(this.cacheName, CacheMsgType.DELETE,
            key,null, MessageSourceUtil.getMsgSource());
    redisTemplate.convertAndSend(MessageConfig.TOPIC,cacheMassage);
}

适配分布式环境的改造工作到此结束,最终版如下。

六、最终版代码-v4.release

6.1 double-cache模块

目录

在这里插入图片描述

6.2 代码

DoubleCache

import com.github.benmanes.caffeine.cache.Cache;
import lombok.extern.slf4j.Slf4j;
import org.example.doublecache.config.DoubleCacheConfig;
import org.example.doublecache.config.MessageConfig;
import org.example.doublecache.msg.CacheMassage;
import org.example.doublecache.msg.CacheMsgType;
import org.example.doublecache.msg.MessageSourceUtil;
import org.springframework.cache.support.AbstractValueAdaptingCache;
import org.springframework.data.redis.core.RedisTemplate;

import java.net.UnknownHostException;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;

@Slf4j
public class DoubleCache extends AbstractValueAdaptingCache {
    private String cacheName;
    private RedisTemplate<Object, Object> redisTemplate;
    private Cache<Object, Object> caffeineCache;
    private DoubleCacheConfig doubleCacheConfig;

    protected DoubleCache(boolean allowNullValues) {
        super(allowNullValues);
    }

    public DoubleCache(String cacheName, RedisTemplate<Object, Object> redisTemplate,
                       Cache<Object, Object> caffeineCache,
                       DoubleCacheConfig doubleCacheConfig) {
        super(doubleCacheConfig.getAllowNull());
        this.cacheName = cacheName;
        this.redisTemplate = redisTemplate;
        this.caffeineCache = caffeineCache;
        this.doubleCacheConfig = doubleCacheConfig;
    }

    //使用注解时不走这个方法,实际走父类的get方法
    @Override
    public <T> T get(Object key, Callable<T> valueLoader) {
        ReentrantLock lock = new ReentrantLock();
        try {
            lock.lock();//加锁

            Object obj = lookup(key);
            if (Objects.nonNull(obj)) {
                return (T) obj;
            }
            //没有找到
            obj = valueLoader.call();
            //放入缓存
            put(key, obj);
            return (T) obj;
        } catch (Exception e) {
            log.error(e.getMessage());
        } finally {
            lock.unlock();
        }
        return null;
    }

    @Override
    protected Object lookup(Object key) {
        // 先从caffeine中查找
        Object obj = caffeineCache.getIfPresent(key);
        if (Objects.nonNull(obj)) {
            log.info("get data from caffeine");
            return obj; //不用fromStoreValue,否则返回的是null,会再查数据库
        }

        //再从redis中查找
        String redisKey = this.cacheName + ":" + key;
        obj = redisTemplate.opsForValue().get(redisKey);
        if (Objects.nonNull(obj)) {
            log.info("get data from redis");
            caffeineCache.put(key, obj);
        }
        return obj;
    }

    @Override
    public void put(Object key, Object value) {
        if (!isAllowNullValues() && Objects.isNull(value)) {
            log.error("the value NULL will not be cached");
            return;
        }

        //使用 toStoreValue(value) 包装,解决caffeine不能存null的问题
        //caffeineCache.put(key,value);
        caffeineCache.put(key, toStoreValue(value));

        // null对象只存在caffeine中一份就够了,不用存redis了
        if (Objects.isNull(value))
            return;

        String redisKey = this.cacheName + ":" + key;
        Optional<Long> expireOpt = Optional.ofNullable(doubleCacheConfig)
                .map(DoubleCacheConfig::getRedisExpire);
        if (expireOpt.isPresent()) {
            redisTemplate.opsForValue().set(redisKey, toStoreValue(value),
                    expireOpt.get(), TimeUnit.SECONDS);
        } else {
            redisTemplate.opsForValue().set(redisKey, toStoreValue(value));
        }

        //发送信息通知其他节点更新一级缓存
        //同样,空对象不会给其他节点发送信息
        try {
            CacheMessage cacheMessage
                    = new CacheMessage(this.cacheName, CacheMsgType.UPDATE,
                    key, value, MessageSourceUtil.getMsgSource());
            redisTemplate.convertAndSend(MessageConfig.TOPIC, cacheMassage);
        } catch (UnknownHostException e) {
            log.error(e.getMessage());
        }
    }

    @Override
    public void evict(Object key) {
        redisTemplate.delete(this.cacheName + ":" + key);
        caffeineCache.invalidate(key);

        //发送信息通知其他节点删除一级缓存
        try {
            CacheMessage cacheMessage
                    = new CacheMessage(this.cacheName, CacheMsgType.DELETE,
                    key, null, MessageSourceUtil.getMsgSource());
            redisTemplate.convertAndSend(MessageConfig.TOPIC, cacheMassage);
        } catch (UnknownHostException e) {
            log.error(e.getMessage());
        }
    }

    @Override
    public void clear() {
        //如果是正式环境,避免使用keys命令
        Set<Object> keys = redisTemplate.keys(this.cacheName.concat(":*"));
        for (Object key : keys) {
            redisTemplate.delete(String.valueOf(key));
        }
        caffeineCache.invalidateAll();
    }

    @Override
    public String getName() {
        return this.cacheName;
    }

    @Override
    public Object getNativeCache() {
        return this;
    }

    // 更新一级缓存
    public void updateL1Cache(Object key, Object value) {
        caffeineCache.put(key, value);
    }

    // 删除一级缓存
    public void evictL1Cache(Object key) {
        caffeineCache.invalidate(key);
    }
}

DoubleCacheManager

import org.example.doublecache.config.DoubleCacheConfig;
import com.github.benmanes.caffeine.cache.Caffeine;
import org.springframework.cache.Cache;
import org.springframework.cache.CacheManager;
import org.springframework.data.redis.core.RedisTemplate;

import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;

public class DoubleCacheManager implements CacheManager {
    Map<String, Cache> cacheMap = new ConcurrentHashMap<>();
    private RedisTemplate<Object, Object> redisTemplate;
    private DoubleCacheConfig dcConfig;

    public DoubleCacheManager(RedisTemplate<Object, Object> redisTemplate,
                              DoubleCacheConfig doubleCacheConfig) {
        this.redisTemplate = redisTemplate;
        this.dcConfig = doubleCacheConfig;
    }

    @Override
    public Cache getCache(String name) {
        Cache cache = cacheMap.get(name);
        if (Objects.nonNull(cache)) {
            return cache;
        }

        cache = new DoubleCache(name, redisTemplate, createCaffeineCache(), dcConfig);
        Cache oldCache = cacheMap.putIfAbsent(name, cache);
        return oldCache == null ? cache : oldCache;
    }

    @Override
    public Collection<String> getCacheNames() {
        return cacheMap.keySet();
    }

    private com.github.benmanes.caffeine.cache.Cache createCaffeineCache(){
        Caffeine<Object, Object> caffeineBuilder = Caffeine.newBuilder();
        Optional<DoubleCacheConfig> dcConfigOpt = Optional.ofNullable(this.dcConfig);
        dcConfigOpt.map(DoubleCacheConfig::getInit)
                .ifPresent(init->caffeineBuilder.initialCapacity(init));
        dcConfigOpt.map(DoubleCacheConfig::getMax)
                .ifPresent(max->caffeineBuilder.maximumSize(max));
        dcConfigOpt.map(DoubleCacheConfig::getExpireAfterWrite)
                .ifPresent(eaw->caffeineBuilder.expireAfterWrite(eaw,TimeUnit.SECONDS));
        dcConfigOpt.map(DoubleCacheConfig::getExpireAfterAccess)
                .ifPresent(eaa->caffeineBuilder.expireAfterAccess(eaa,TimeUnit.SECONDS));
        dcConfigOpt.map(DoubleCacheConfig::getRefreshAfterWrite)
                .ifPresent(raw->caffeineBuilder.refreshAfterWrite(raw,TimeUnit.SECONDS));
        return caffeineBuilder.build();
    }
}

CacheConfig@EnableCaching注解移动到这里来了

import org.example.doublecache.cache.DoubleCacheManager;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.core.RedisTemplate;

@Configuration
@EnableCaching
@EnableConfigurationProperties({DoubleCacheConfig.class})
public class CacheConfig {
    @Autowired
    DoubleCacheConfig doubleCacheConfig;

    @Bean
    public DoubleCacheManager cacheManager(RedisTemplate<Object,Object> redisTemplate,
                                           DoubleCacheConfig doubleCacheConfig){
        return new DoubleCacheManager(redisTemplate,doubleCacheConfig);
    }
}

DoubleCacheConfig

import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;

@Data
@ConfigurationProperties(prefix = "doublecache")
public class DoubleCacheConfig {
    private Boolean allowNull = true;
    private Integer init = 100;
    private Integer max = 1000;
    private Long expireAfterWrite ;
    private Long expireAfterAccess;
    private Long refreshAfterWrite;
    private Long redisExpire;
}

MessageConfig

import org.example.doublecache.msg.RedisMessageReceiver;
import org.example.doublecache.util.SpringContextUtil;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.listener.PatternTopic;
import org.springframework.data.redis.listener.RedisMessageListenerContainer;
import org.springframework.data.redis.listener.adapter.MessageListenerAdapter;

@Configuration
@Import({RedisMessageReceiver.class, SpringContextUtil.class})
public class MessageConfig {
    public static final String TOPIC="cache.msg";

    @Bean
    RedisMessageListenerContainer container(MessageListenerAdapter listenerAdapter,
                                            RedisConnectionFactory redisConnectionFactory){
        RedisMessageListenerContainer container = new RedisMessageListenerContainer();
        container.setConnectionFactory(redisConnectionFactory);
        container.addMessageListener(listenerAdapter, new PatternTopic(TOPIC));
        return container;
    }

    @Bean
    MessageListenerAdapter adapter(RedisMessageReceiver receiver){
        return new MessageListenerAdapter(receiver,"receive");
    }
}

RedisConfig

import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonTypeInfo;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.springframework.cache.annotation.CachingConfigurerSupport;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.serializer.Jackson2JsonRedisSerializer;
import org.springframework.data.redis.serializer.RedisSerializer;
import org.springframework.data.redis.serializer.StringRedisSerializer;

@Configuration
public class RedisConfig extends CachingConfigurerSupport {
    @Bean
    public RedisTemplate redisTemplate(RedisConnectionFactory redisConnectionFactory){
        RedisTemplate<Object, Object> redisTemplate=new RedisTemplate<>();
        redisTemplate.setConnectionFactory(redisConnectionFactory);

        // json序列化设置
        Jackson2JsonRedisSerializer<Object> jackson2JsonRedisSerializer
                = new Jackson2JsonRedisSerializer<>(Object.class);
        ObjectMapper om = new ObjectMapper();
        om.setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.ANY);

        om.activateDefaultTyping(om.getPolymorphicTypeValidator(),
                ObjectMapper.DefaultTyping.NON_FINAL,//类名序列化到json串中
                JsonTypeInfo.As.WRAPPER_ARRAY);
        jackson2JsonRedisSerializer.setObjectMapper(om);

        //String类型的序列化
        RedisSerializer<?> stringSerializer = new StringRedisSerializer();

        redisTemplate.setKeySerializer(stringSerializer);// key采用String序列化方式
        redisTemplate.setValueSerializer(jackson2JsonRedisSerializer);// value序列化
        redisTemplate.setHashKeySerializer(stringSerializer);// Hash key采用String序列化方式
        redisTemplate.setHashValueSerializer(jackson2JsonRedisSerializer);// Hash value序列化
        redisTemplate.afterPropertiesSet();
        return redisTemplate;
    }
}

CacheMassage

import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.io.Serializable;

@Data
@NoArgsConstructor
@AllArgsConstructor
public class CacheMessage implements Serializable {
    private static final long serialVersionUID = -3574997636829868400L;

    private String cacheName;
    private CacheMsgType type;  //标识更新或删除操作
    private Object key;
    private Object value;
    private String msgSource;   //源主机标识,用来避免重复操作
}

CacheMsgType

public enum CacheMsgType {
    UPDATE,
    DELETE;
}

MessageSourceUtil

import org.springframework.core.env.Environment;

import java.net.InetAddress;
import java.net.UnknownHostException;

public class MessageSourceUtil {
    public static String getMsgSource() throws UnknownHostException {
        String host = InetAddress.getLocalHost().getHostAddress();
        Environment env = SpringContextUtil.getBean(Environment.class);
        String port = env.getProperty("server.port");
        return host+":"+port;
    }
}

RedisMessageReceiver

import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.example.doublecache.cache.DoubleCache;
import org.example.doublecache.cache.DoubleCacheManager;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Component;
import java.net.UnknownHostException;

@Slf4j
@Component
@AllArgsConstructor
public class RedisMessageReceiver {
    private final RedisTemplate redisTemplate;
    private final DoubleCacheManager manager;

    //接收通知,进行处理
    public void receive(String message) throws UnknownHostException {
        CacheMessage msg = (CacheMessage) redisTemplate
                .getValueSerializer().deserialize(message.getBytes());
        log.info(msg.toString());

        //如果是本机发出的消息,那么不进行处理
        if (msg.getMsgSource().equals(MessageSourceUtil.getMsgSource())){
            log.info("收到本机发出的消息,不做处理");
            return;
        }

        DoubleCache cache = (DoubleCache) manager.getCache(msg.getCacheName());
        if (msg.getType()== CacheMsgType.UPDATE) {
            cache.updateL1Cache(msg.getKey(),msg.getValue());
            log.info("更新本地缓存");
        }

        if (msg.getType()== CacheMsgType.DELETE) {
            log.info("删除本地缓存");
            cache.evictL1Cache(msg.getKey());
        }
    }
}

SpringContextUtil

import org.springframework.beans.BeansException;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.stereotype.Component;

@Component
public class SpringContextUtil implements ApplicationContextAware {

    private static ApplicationContext applicationContext;

    @Override
    public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
        this.applicationContext = applicationContext;
    }

    public static ApplicationContext getApplicationContext() {
        return applicationContext;
    }

    public static Object getBean(String name) {
        return applicationContext.getBean(name);
    }

    public static <T> T getBean(Class<T> t) {
        return applicationContext.getBean(t);
    }

}

spring.factories

org.springframework.boot.autoconfigure.EnableAutoConfiguration = \
  org.example.doublecache.config.RedisConfig, \
  org.example.doublecache.config.CacheConfig, \
  org.example.doublecache.config.MessageConfig

pom.xml

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>org.example</groupId>
    <artifactId>double-cache</artifactId>
    <version>1.0-SNAPSHOT</version>

    <parent>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-parent</artifactId>
        <version>2.7.2</version>
        <relativePath/>
    </parent>

    <properties>
        <maven.compiler.source>8</maven.compiler.source>
        <maven.compiler.target>8</maven.compiler.target>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
        </dependency>

        <dependency>
            <groupId>com.github.ben-manes.caffeine</groupId>
            <artifactId>caffeine</artifactId>
            <version>2.9.2</version>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-redis</artifactId>
        </dependency>
        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <version>1.18.16</version>
            <scope>provided</scope>
        </dependency>
    </dependencies>

</project>

6.3 测试模块主要代码

pom.xml

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>org.example</groupId>
    <artifactId>testcache</artifactId>
    <version>1.0-SNAPSHOT</version>

    <parent>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-parent</artifactId>
        <version>2.7.2</version>
        <relativePath/>
    </parent>
    <properties>
        <maven.compiler.source>8</maven.compiler.source>
        <maven.compiler.target>8</maven.compiler.target>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
        <mybatis-plus.version>3.3.2</mybatis-plus.version>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.example</groupId>
            <artifactId>double-cache</artifactId>
            <version>1.0-SNAPSHOT</version>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
        </dependency>
        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <scope>runtime</scope>
        </dependency>
        <dependency>
            <groupId>com.baomidou</groupId>
            <artifactId>mybatis-plus-boot-starter</artifactId>
            <version>${mybatis-plus.version}</version>
        </dependency>
        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <version>1.18.16</version>
            <scope>provided</scope>
        </dependency>
    </dependencies>
</project>

application.yml

server:
  port: 8090

spring:
  application:
    name: test-demo
  datasource:
    url: jdbc:mysql://localhost:3306/ktl?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC
    username: root
    password: root
    driver-class-name: com.mysql.cj.jdbc.Driver
    hikari:
      minimum-idle: 5
      connection-test-query: SELECT 1 FROM DUAL
      maximum-pool-size: 20
      auto-commit: true
      idle-timeout: 30000
      pool-name: MyHikariCP
      max-lifetime: 60000
      connection-timeout: 30000
  redis:
    host: 192.168.200.131
    port: 6379
    database: 0
    timeout: 10000ms
    lettuce:
      pool:
        max-active: 8
        max-wait: -1ms
        max-idle: 8
        min-idle: 0
    password: root
doublecache:
  allowNull: true
  init: 128 #Caffeine初始容量
  max: 1024 #Caffeine最大容量
  expireAfterWrite: 30  #Caffeine过期时间
  redisExpire: 60      #Redis缓存过期时间

OrderServiceImpl

@Service @Slf4j
@AllArgsConstructor
public class OrderServiceImpl extends ServiceImpl<OrderMapper,Order> implements OrderService{
    private final OrderMapper orderMapper;

    @Cacheable(value = "order",key = "#id")
    public Order getOrderById(Long id) {
        Order myOrder = orderMapper.selectOne(new LambdaQueryWrapper<Order>()
                .eq(Order::getId, id));
        return myOrder;
    }

    @CachePut(cacheNames = "order",key = "#order.id")
    public Order updateOrder(Order order) {
        orderMapper.updateById(order);
        return order;
    }

    @CacheEvict(cacheNames = "order",key = "#id")
    public void deleteOrder(Long id) {
        orderMapper.deleteById(id);
    }

    //没有注解,使用get(key,callable)方法
    public Order getOrderById2(Long id) {
//        DoubleCacheManager cacheManager = SpringContextUtil.getBean(DoubleCacheManager.class);
//        Cache cache = cacheManager.getCache("order");
//        Order order =(Order) cache.get(id, (Callable<Object>) () -> {
//            log.info("get data from database");
//            Order myOrder = orderMapper.selectOne(new LambdaQueryWrapper<Order>()
//                    .eq(Order::getId, id));
//            return myOrder;
//        });
        return null;
    }
}

6.4 测试结果

CacheMessage中端口都是8090,是因为实际占用的端口是8090.

发现CacheMessage写成Massage了,自己改一下

启动3个测试模块的服务(端口分别为8090,8091,8092),在配置里VM Options 修改 -Dserver.port=8091。
发送请求:http://127.0.0.1:8090/get/2

在这里插入图片描述

在30秒内发送请求http://127.0.0.1:8091/get/2,发现8091端口的应用本地也有缓存。
在这里插入图片描述

测试update,在30秒内发送请求127.0.0.1:8092/update

{
    "id": 2,
    "orderNumber": "10002",
    "money": 99.0,
    "status": 1
}

在这里插入图片描述
其它两个服务都打印出了“更新本地缓存”的日志

测试delete,发送请求127.0.0.1:8090/del?id=2
发现8091端口如下
在这里插入图片描述
通过8090get
在这里插入图片描述

到此,OK

在这里插入图片描述


http://www.kler.cn/a/302419.html

相关文章:

  • Llama微调测试记录
  • 数字后端教程之Innovus report_property和get_property使用方法及应用案例
  • neo4j desktop基本入门
  • Appium配置2024.11.12
  • 系统架构设计师论文:大数据Lambda架构
  • Autosar CP DDS规范导读
  • 关于支付宝小程序客户端退出不了的问题的认知和退出的解决方案
  • Object Pascal 基本数据类型
  • 宝马销量崩了,自己作死拦都拦不住
  • 压测服务器并使用 Grafana 进行可视化
  • (不用互三)AI绘画工具大比拼:Midjourney VS Stable Diffusion该如何选择?
  • 【JUC】16-Java对象内存布局和对象头
  • C和指针:指针
  • 超越博士水平:OpenAI o1模型在科学和编程领域的卓越表现
  • 【专题】2024飞行汽车技术全景报告合集PDF分享(附原数据表)
  • SpringBoot学习(11)MongoDB使用
  • 栈和队列的算法题目(C语言)
  • 8月更新速递丨秋风送爽,EasyTwin产品能力升级不停~
  • 叉车智能ai防撞系统解决方案
  • TS 常用类型
  • 小程序的右侧抽屉开关动画手写效果
  • Linux网络服务只iptables防火墙工具
  • 网络编程day04(UDP、Linux IO 模型)
  • Hive SQL基础语法及查询实践
  • Linux网络:网络套接字-TCP回显服务器——多进程/线程池(生产者消费者模型)
  • “区块链积分系统:支付安全与效率的新篇章