未验证 提交 ec5239eb 编写于 作者: 梦境迷离's avatar 梦境迷离 提交者: GitHub

add benchmark (#163)

* add benchmark
上级 0b1b7010
...@@ -104,6 +104,18 @@ lazy val `cacheable-redis` = (project in file("cacheable-redis")) ...@@ -104,6 +104,18 @@ lazy val `cacheable-redis` = (project in file("cacheable-redis"))
.settings(paradise()) .settings(paradise())
.enablePlugins(HeaderPlugin) .enablePlugins(HeaderPlugin)
lazy val `cacheable-benchmark` = (project in file("cacheable-benchmark"))
.settings(commonSettings)
.settings(
name := "smt-cacheable-benchmark",
publish / skip := true,
excludeDependencies ++= Seq(
InclExclRule("com.google.protobuf")
)
).dependsOn(`cacheable-core`, `cacheable-redis`, `cacheable-caffeine`)
.settings(paradise())
.enablePlugins(HeaderPlugin, JmhPlugin)
lazy val tools = (project in file("tools")) lazy val tools = (project in file("tools"))
.settings(commonSettings) .settings(commonSettings)
.settings( .settings(
...@@ -128,7 +140,7 @@ lazy val tools = (project in file("tools")) ...@@ -128,7 +140,7 @@ lazy val tools = (project in file("tools"))
.settings(paradise()) .settings(paradise())
.enablePlugins(HeaderPlugin, ProtocPlugin) .enablePlugins(HeaderPlugin, ProtocPlugin)
lazy val root = (project in file(".")).aggregate(tools, `cacheable-core`, `cacheable-redis`, `cacheable-caffeine`) lazy val root = (project in file(".")).aggregate(tools, `cacheable-core`, `cacheable-redis`, `cacheable-caffeine`, `cacheable-benchmark`)
.settings( .settings(
publishArtifact := false, publishArtifact := false,
publish / skip := true, publish / skip := true,
......
**环境**
```
[info] # JMH version: 1.32
[info] # VM version: JDK 17.0.1, OpenJDK 64-Bit Server VM, 17.0.1+0
[info] # VM invoker: /usr/local/Cellar/openjdk/17.0.1/libexec/openjdk.jdk/Contents/Home/bin/java
```
**测试方法**
```scala
ZIO.effect {
Try(Thread.sleep(5)).getOrElse(())
Random.nextInt() + ""
}
```
**JMH配置**
```
@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
@Measurement(iterations = 5)
@Warmup(iterations = 5)
@Fork(3)
```
**caffeine配置**
```
caffeine {
maximumSize = 100
expireAfterWriteSeconds = 60
disabledLog = true
}
```
**redis配置**
```
redis {
host = "0.0.0.0"
port = 6379
disabledLog = true
}
```
**结果**
```
[info] Benchmark (limitRandNum) Mode Cnt Score Error Units
[info] CacheableBenchmarks.benchmarkCaffeineCache 2 thrpt 15 668084.614 ± 20389.518 ops/s
[info] CacheableBenchmarks.benchmarkNoCache 2 thrpt 15 164.198 ± 1.656 ops/s
[info] CacheableBenchmarks.benchmarkRedisCache 2 thrpt 15 663.941 ± 316.381 ops/s
```
\ No newline at end of file
/*
* Copyright (c) 2022 bitlap
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package org.bitlap.cacheable.benchmark
import zio.{ BootstrapRuntime, ZIO }
import zio.internal.Platform
/**
* runtime
*
* @author 梦境迷离
* @version 1.0,2022/3/22
*/
trait BenchmarkRuntime extends BootstrapRuntime {
override val platform: Platform = Platform.benchmark
final def execute[T](query: ZIO[Any, Throwable, T]): Unit =
unsafeRun(query.catchAll(_ => ZIO.effect(())))
}
/*
* Copyright (c) 2022 bitlap
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package org.bitlap.cacheable.benchmark
import scala.util.Try
import org.bitlap.cacheable.core.cacheable
import org.openjdk.jmh.annotations._
import zio.ZIO
import java.util.concurrent.TimeUnit
import scala.util.Random
/**
* benchmark @cacheable
*
* @author 梦境迷离
* @version 1.0,2022/3/22
*/
@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
@Measurement(iterations = 5)
@Warmup(iterations = 5)
@Fork(3)
class CacheableBenchmarks extends BenchmarkRuntime {
// make it use cache only sometimes
// Smaller numbers are easier to hit the cache
// 25% hit cache
@Param(Array("2"))
var limitRandNum: Int = _
@Benchmark
def benchmarkRedisCache(): Unit = {
execute[String](cacheableRedis(Random.nextInt(limitRandNum), Random.nextInt(limitRandNum) + ""))
}
@Benchmark
def benchmarkCaffeineCache(): Unit = {
execute[String](cacheableCaffeine(Random.nextInt(limitRandNum), Random.nextInt(limitRandNum) + ""))
}
@Benchmark
def benchmarkNoCache(): Unit = {
execute[String](unCacheable(Random.nextInt(limitRandNum), Random.nextInt(limitRandNum) + ""))
}
@cacheable(local = false) // use RedisExecutor.live, not RedisExecutor.local
@inline def cacheableRedis(id: Int, key: String): ZIO[Any, Throwable, String] = {
ZIO.effect {
Try(Thread.sleep(5)).getOrElse(()) // Simulate a JDBC request
Random.nextInt() + ""
}
}
@cacheable(local = true)
@inline def cacheableCaffeine(id: Int, key: String): ZIO[Any, Throwable, String] = {
ZIO.effect {
Try(Thread.sleep(5)).getOrElse(())
Random.nextInt() + ""
}
}
@inline def unCacheable(id: Int, key: String): ZIO[Any, Throwable, String] = {
ZIO.effect {
Try(Thread.sleep(5)).getOrElse(())
Random.nextInt() + ""
}
}
}
caffeine { caffeine {
maximumSize = 100 maximumSize = 100
expireAfterWriteSeconds = 60 expireAfterWriteSeconds = 60
disabledLog = true
} }
\ No newline at end of file
...@@ -37,7 +37,7 @@ object Implicits { ...@@ -37,7 +37,7 @@ object Implicits {
override def evict(business: => ZStream[Any, Throwable, T])(identities: List[String]): ZStream[Any, Throwable, T] = { override def evict(business: => ZStream[Any, Throwable, T])(identities: List[String]): ZStream[Any, Throwable, T] = {
for { for {
updateResult <- ZStream.fromIterable(identities).map(key => ZCaffeine.del(key)) *> business updateResult <- ZStream.fromIterable(identities).map(key => ZCaffeine.del(key)) *> business
_ <- LogUtils.debugS(s"Caffeine ZStream update: identities:[$identities], updateResult:[$updateResult]") _ <- if (ZCaffeine.disabledLog) ZStream.unit else LogUtils.debugS(s"Caffeine ZStream update: identities:[$identities], updateResult:[$updateResult]")
} yield updateResult } yield updateResult
} }
} }
...@@ -48,9 +48,9 @@ object Implicits { ...@@ -48,9 +48,9 @@ object Implicits {
val field = cacheField(args) val field = cacheField(args)
for { for {
cacheValue <- ZStream.fromEffect(ZCaffeine.hGet[T](key, field)) cacheValue <- ZStream.fromEffect(ZCaffeine.hGet[T](key, field))
_ <- LogUtils.debugS(s"Caffeine ZStream getIfPresent: identity:[$key],field:[$field],cacheValue:[$cacheValue]") _ <- if (ZCaffeine.disabledLog) ZStream.unit else LogUtils.debugS(s"Caffeine ZStream getIfPresent: identity:[$key],field:[$field],cacheValue:[$cacheValue]")
result <- cacheValue.fold(business.mapM(r => ZCaffeine.hSet(key, field, r).as(r)))(value => ZStream.fromEffect(ZIO.effectTotal(value))) result <- cacheValue.fold(business.mapM(r => ZCaffeine.hSet(key, field, r).as(r)))(value => ZStream.fromEffect(ZIO.effectTotal(value)))
_ <- LogUtils.debugS(s"Caffeine ZStream getIfPresent: identity:[$key],field:[$field],result:[$result]") _ <- if (ZCaffeine.disabledLog) ZStream.unit else LogUtils.debugS(s"Caffeine ZStream getIfPresent: identity:[$key],field:[$field],result:[$result]")
} yield result } yield result
} }
} }
...@@ -59,7 +59,7 @@ object Implicits { ...@@ -59,7 +59,7 @@ object Implicits {
override def evict(business: => ZIO[Any, Throwable, T])(identities: List[String]): ZIO[Any, Throwable, T] = { override def evict(business: => ZIO[Any, Throwable, T])(identities: List[String]): ZIO[Any, Throwable, T] = {
for { for {
updateResult <- ZIO.foreach_(identities)(key => ZCaffeine.del(key)) *> business updateResult <- ZIO.foreach_(identities)(key => ZCaffeine.del(key)) *> business
_ <- LogUtils.debug(s"Caffeine ZIO update: identities:[$identities], updateResult:[$updateResult]") _ <- LogUtils.debug(s"Caffeine ZIO update: identities:[$identities], updateResult:[$updateResult]").unless(ZCaffeine.disabledLog)
} yield updateResult } yield updateResult
} }
} }
...@@ -70,9 +70,9 @@ object Implicits { ...@@ -70,9 +70,9 @@ object Implicits {
val field = cacheField(args) val field = cacheField(args)
for { for {
cacheValue <- ZCaffeine.hGet[T](key, field) cacheValue <- ZCaffeine.hGet[T](key, field)
_ <- LogUtils.debug(s"Caffeine ZIO getIfPresent: identity:[$key], field:[$field], cacheValue:[$cacheValue]") _ <- LogUtils.debug(s"Caffeine ZIO getIfPresent: identity:[$key], field:[$field], cacheValue:[$cacheValue]").unless(ZCaffeine.disabledLog)
result <- cacheValue.fold(business.tap(r => ZCaffeine.hSet(key, field, r).as(r)))(value => ZIO.effectTotal(value)) result <- cacheValue.fold(business.tap(r => ZCaffeine.hSet(key, field, r).as(r)))(value => ZIO.effectTotal(value))
_ <- LogUtils.debug(s"Caffeine ZIO getIfPresent: identity:[$key], field:[$field], result:[$result]") _ <- LogUtils.debug(s"Caffeine ZIO getIfPresent: identity:[$key], field:[$field], result:[$result]").unless(ZCaffeine.disabledLog)
} yield result } yield result
} }
} }
......
...@@ -40,6 +40,8 @@ object ZCaffeine { ...@@ -40,6 +40,8 @@ object ZCaffeine {
private val conf: Config = ConfigFactory.load("reference.conf") private val conf: Config = ConfigFactory.load("reference.conf")
private val custom: Config = ConfigFactory.load("application.conf").withFallback(conf) private val custom: Config = ConfigFactory.load("application.conf").withFallback(conf)
lazy val disabledLog: Boolean = custom.getBoolean("caffeine.disabledLog")
private lazy val maximumSize = custom.getInt("caffeine.maximumSize") private lazy val maximumSize = custom.getInt("caffeine.maximumSize")
private lazy val expireAfterWriteSeconds = custom.getInt("caffeine.expireAfterWriteSeconds") private lazy val expireAfterWriteSeconds = custom.getInt("caffeine.expireAfterWriteSeconds")
...@@ -92,9 +94,8 @@ object ZCaffeine { ...@@ -92,9 +94,8 @@ object ZCaffeine {
chm.put(field, value) chm.put(field, value)
hashCache.put(key, chm) hashCache.put(key, chm)
} else { } else {
val chm = hashMap.get(key).asInstanceOf[ConcurrentHashMap[String, Any]] hashMap.put(field, value)
chm.put(field, value) hashCache.put(key, new ConcurrentHashMap(hashMap))
hashCache.put(key, chm)
} }
} }
} }
......
redis { redis {
host = "0.0.0.0" host = "0.0.0.0"
port = 6379 port = 6379
disabledLog = true
} }
\ No newline at end of file
...@@ -38,7 +38,7 @@ object Implicits { ...@@ -38,7 +38,7 @@ object Implicits {
override def evict(business: => ZStream[Any, Throwable, T])(identities: List[String]): ZStream[Any, Throwable, T] = { override def evict(business: => ZStream[Any, Throwable, T])(identities: List[String]): ZStream[Any, Throwable, T] = {
for { for {
updateResult <- ZStream.fromIterable(identities).map(key => ZRedisService.del(key)) *> business updateResult <- ZStream.fromIterable(identities).map(key => ZRedisService.del(key)) *> business
_ <- LogUtils.debugS(s"Redis ZStream update: identities:[$identities], updateResult:[$updateResult]") _ <- if (ZRedisConfiguration.disabledLog) ZStream.unit else LogUtils.debugS(s"Redis ZStream update: identities:[$identities], updateResult:[$updateResult]")
} yield updateResult } yield updateResult
} }
} }
...@@ -49,9 +49,9 @@ object Implicits { ...@@ -49,9 +49,9 @@ object Implicits {
val field = cacheField(args) val field = cacheField(args)
for { for {
cacheValue <- ZStream.fromEffect(ZRedisService.hGet[T](key, field)) cacheValue <- ZStream.fromEffect(ZRedisService.hGet[T](key, field))
_ <- LogUtils.debugS(s"Redis ZStream getIfPresent: identity:[$key],field:[$field],cacheValue:[$cacheValue]") _ <- if (ZRedisConfiguration.disabledLog) ZStream.unit else LogUtils.debugS(s"Redis ZStream getIfPresent: identity:[$key],field:[$field],cacheValue:[$cacheValue]")
result <- cacheValue.fold(business.mapM(r => ZRedisService.hSet[T](key, field, r).as(r)))(value => ZStream.fromEffect(ZIO.effectTotal(value))) result <- cacheValue.fold(business.mapM(r => ZRedisService.hSet[T](key, field, r).as(r)))(value => ZStream.fromEffect(ZIO.effectTotal(value)))
_ <- LogUtils.debugS(s"Redis ZStream getIfPresent: identity:[$key],field:[$field],result:[$result]") _ <- if (ZRedisConfiguration.disabledLog) ZStream.unit else LogUtils.debugS(s"Redis ZStream getIfPresent: identity:[$key],field:[$field],result:[$result]")
} yield result } yield result
} }
} }
...@@ -60,7 +60,7 @@ object Implicits { ...@@ -60,7 +60,7 @@ object Implicits {
override def evict(business: => ZIO[Any, Throwable, T])(identities: List[String]): ZIO[Any, Throwable, T] = { override def evict(business: => ZIO[Any, Throwable, T])(identities: List[String]): ZIO[Any, Throwable, T] = {
for { for {
updateResult <- ZIO.foreach_(identities)(key => ZRedisService.del(key)) *> business updateResult <- ZIO.foreach_(identities)(key => ZRedisService.del(key)) *> business
_ <- LogUtils.debug(s"Redis ZIO update: identities:[$identities], updateResult:[$updateResult]") _ <- LogUtils.debug(s"Redis ZIO update: identities:[$identities], updateResult:[$updateResult]").unless(ZRedisConfiguration.disabledLog)
} yield updateResult } yield updateResult
} }
} }
...@@ -71,9 +71,9 @@ object Implicits { ...@@ -71,9 +71,9 @@ object Implicits {
val field = cacheField(args) val field = cacheField(args)
for { for {
cacheValue <- ZRedisService.hGet[T](key, field) cacheValue <- ZRedisService.hGet[T](key, field)
_ <- LogUtils.debug(s"Redis ZIO getIfPresent: identity:[$key], field:[$field], cacheValue:[$cacheValue]") _ <- LogUtils.debug(s"Redis ZIO getIfPresent: identity:[$key], field:[$field], cacheValue:[$cacheValue]").unless(ZRedisConfiguration.disabledLog)
result <- cacheValue.fold(business.tap(r => ZRedisService.hSet[T](key, field, r).as(r)))(value => ZIO.effectTotal(value)) result <- cacheValue.fold(business.tap(r => ZRedisService.hSet[T](key, field, r).as(r)))(value => ZIO.effectTotal(value))
_ <- LogUtils.debug(s"Redis ZIO getIfPresent: identity:[$key], field:[$field], result:[$result]") _ <- LogUtils.debug(s"Redis ZIO getIfPresent: identity:[$key], field:[$field], result:[$result]").unless(ZRedisConfiguration.disabledLog)
} yield result } yield result
} }
} }
......
...@@ -26,6 +26,7 @@ import org.bitlap.cacheable.core.LogUtils ...@@ -26,6 +26,7 @@ import org.bitlap.cacheable.core.LogUtils
import zio.redis.{ Redis, RedisConfig, RedisError, RedisExecutor } import zio.redis.{ Redis, RedisConfig, RedisError, RedisExecutor }
import zio.schema.codec.{ Codec, ProtobufCodec } import zio.schema.codec.{ Codec, ProtobufCodec }
import zio.{ Has, Layer, ULayer, ZLayer } import zio.{ Has, Layer, ULayer, ZLayer }
import zio.logging.Logging
/** /**
* redis configuration * redis configuration
...@@ -36,14 +37,16 @@ import zio.{ Has, Layer, ULayer, ZLayer } ...@@ -36,14 +37,16 @@ import zio.{ Has, Layer, ULayer, ZLayer }
*/ */
object ZRedisConfiguration { object ZRedisConfiguration {
private val conf: Config = ConfigFactory.load("reference.conf") private lazy val conf: Config = ConfigFactory.load("reference.conf")
private val custom: Config = ConfigFactory.load("application.conf").withFallback(conf) private lazy val custom: Config = ConfigFactory.load("application.conf").withFallback(conf)
private val redisConf: RedisConfig = RedisConfig(custom.getString("redis.host"), custom.getInt("redis.port")) private lazy val redisConf: RedisConfig = RedisConfig(custom.getString("redis.host"), custom.getInt("redis.port"))
lazy val disabledLog: Boolean = custom.getBoolean("redis.disabledLog")
private val codec: ULayer[Has[Codec]] = ZLayer.succeed[Codec](ProtobufCodec) private val codec: ULayer[Has[Codec]] = ZLayer.succeed[Codec](ProtobufCodec)
lazy val redisLayer: Layer[RedisError.IOError, ZRedisCacheService] = lazy val redisLayer: Layer[RedisError.IOError, ZRedisCacheService] =
((LogUtils.logLayer ++ ZLayer.succeed(redisConf)) >>> (((if (disabledLog) Logging.ignore else LogUtils.logLayer) ++ ZLayer.succeed(redisConf)) >>>
RedisExecutor.live ++ ZRedisConfiguration.codec) >>> RedisExecutor.live ++ ZRedisConfiguration.codec) >>>
(Redis.live >>> (r => ZRedisLive(r)).toLayer) (Redis.live >>> (r => ZRedisLive(r)).toLayer)
} }
...@@ -4,4 +4,5 @@ addSbtPlugin("com.github.sbt" % "sbt-pgp" % "2.1.2") ...@@ -4,4 +4,5 @@ addSbtPlugin("com.github.sbt" % "sbt-pgp" % "2.1.2")
addSbtPlugin("com.github.gseitz" % "sbt-release" % "1.0.13") addSbtPlugin("com.github.gseitz" % "sbt-release" % "1.0.13")
addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.6.5") addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.6.5")
addSbtPlugin("org.scoverage" % "sbt-scoverage" % "1.9.3") addSbtPlugin("org.scoverage" % "sbt-scoverage" % "1.9.3")
addSbtPlugin("org.jmotor.sbt" % "sbt-protoc" % "1.0.19") addSbtPlugin("org.jmotor.sbt" % "sbt-protoc" % "1.0.19")
\ No newline at end of file addSbtPlugin("pl.project13.scala" % "sbt-jmh" % "0.4.3")
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册