mirror of
https://github.com/openimsdk/open-im-server.git
synced 2025-10-25 20:52:11 +08:00
Merge remote-tracking branch 'upstream/main'
This commit is contained in:
commit
a8891784dc
@ -1,7 +1,6 @@
|
||||
address: [ localhost:16379 ]
|
||||
username: ''
|
||||
password: openIM123
|
||||
enablePipeline: false
|
||||
clusterMode: false
|
||||
db: 0
|
||||
maxRetry: 10
|
||||
5
go.mod
5
go.mod
@ -7,7 +7,7 @@ require (
|
||||
github.com/dtm-labs/rockscache v0.1.1
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/go-playground/validator/v10 v10.18.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0
|
||||
github.com/gorilla/websocket v1.5.1
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
@ -31,6 +31,7 @@ require (
|
||||
github.com/IBM/sarama v1.43.0
|
||||
github.com/fatih/color v1.14.1
|
||||
github.com/go-redis/redis v6.15.9+incompatible
|
||||
github.com/go-redis/redismock/v9 v9.2.0
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7
|
||||
github.com/kelindar/bitmap v1.5.2
|
||||
github.com/likexian/gokit v0.25.13
|
||||
@ -112,8 +113,6 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect
|
||||
github.com/mozillazg/go-httpheader v0.4.0 // indirect
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/onsi/gomega v1.18.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
|
||||
42
go.sum
42
go.sum
@ -38,9 +38,6 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I=
|
||||
github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
@ -83,8 +80,6 @@ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8
|
||||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
|
||||
@ -110,7 +105,8 @@ github.com/go-playground/validator/v10 v10.18.0 h1:BvolUXjp4zuvkZ5YN5t7ebzbhlUtP
|
||||
github.com/go-playground/validator/v10 v10.18.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
|
||||
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-redis/redismock/v9 v9.2.0 h1:ZrMYQeKPECZPjOj5u9eyOjg8Nnb0BS9lkVIZ6IpsKLw=
|
||||
github.com/go-redis/redismock/v9 v9.2.0/go.mod h1:18KHfGDK4Y6c2R0H38EUGWAdc7ZQS9gfYxc94k7rWT0=
|
||||
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
|
||||
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
@ -133,7 +129,6 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
@ -157,7 +152,6 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
|
||||
github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
|
||||
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@ -186,8 +180,6 @@ github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
|
||||
@ -270,20 +262,12 @@ github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJ
|
||||
github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60=
|
||||
github.com/mozillazg/go-httpheader v0.4.0 h1:aBn6aRXtFzyDLZ4VIRLsZbbJloagQfMnCiYgOq6hK4w=
|
||||
github.com/mozillazg/go-httpheader v0.4.0/go.mod h1:PuT8h0pw6efvp8ZeUec1Rs7dwjK08bt6gKSReGMqtdA=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
|
||||
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
|
||||
github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
|
||||
github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
|
||||
github.com/openimsdk/gomake v0.0.13 h1:xLDe/moqgWpRoptHzI4packAWzs4C16b+sVY+txNJp0=
|
||||
github.com/openimsdk/gomake v0.0.13/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI=
|
||||
github.com/openimsdk/protocol v0.0.65 h1:SPT9qyUsFRTTKSKb/FjpS+xr6sxz/Kbnu+su1bxYagc=
|
||||
@ -348,7 +332,6 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
@ -438,18 +421,15 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
@ -467,21 +447,12 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@ -494,7 +465,6 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
@ -509,7 +479,6 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -556,14 +525,11 @@ google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@ -48,10 +48,6 @@ func Start(ctx context.Context, index int, config *Config) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prometheusPort, err := datautil.GetElemByIndex(config.API.Prometheus.Ports, index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var client discovery.SvcDiscoveryRegistry
|
||||
|
||||
@ -62,13 +58,20 @@ func Start(ctx context.Context, index int, config *Config) error {
|
||||
}
|
||||
|
||||
var (
|
||||
netDone = make(chan struct{}, 1)
|
||||
netErr error
|
||||
netDone = make(chan struct{}, 1)
|
||||
netErr error
|
||||
prometheusPort int
|
||||
)
|
||||
|
||||
router := newGinRouter(client, config)
|
||||
if config.API.Prometheus.Enable {
|
||||
go func() {
|
||||
prometheusPort, err = datautil.GetElemByIndex(config.API.Prometheus.Ports, index)
|
||||
if err != nil {
|
||||
netErr = err
|
||||
netDone <- struct{}{}
|
||||
return
|
||||
}
|
||||
p := ginprom.NewPrometheus("app", prommetrics.GetGinCusMetrics("Api"))
|
||||
p.SetListenAddress(fmt.Sprintf(":%d", prometheusPort))
|
||||
if err = p.Use(router); err != nil && err != http.ErrServerClosed {
|
||||
|
||||
@ -47,7 +47,6 @@ func (s *Server) Start(ctx context.Context, index int, conf *Config) error {
|
||||
|
||||
type Server struct {
|
||||
rpcPort int
|
||||
prometheusPort int
|
||||
LongConnServer LongConnServer
|
||||
config *Config
|
||||
pushTerminal map[int]struct{}
|
||||
@ -57,10 +56,9 @@ func (s *Server) SetLongConnServer(LongConnServer LongConnServer) {
|
||||
s.LongConnServer = LongConnServer
|
||||
}
|
||||
|
||||
func NewServer(rpcPort int, proPort int, longConnServer LongConnServer, conf *Config) *Server {
|
||||
func NewServer(rpcPort int, longConnServer LongConnServer, conf *Config) *Server {
|
||||
s := &Server{
|
||||
rpcPort: rpcPort,
|
||||
prometheusPort: proPort,
|
||||
LongConnServer: longConnServer,
|
||||
pushTerminal: make(map[int]struct{}),
|
||||
config: conf,
|
||||
|
||||
@ -38,10 +38,6 @@ func Start(ctx context.Context, index int, conf *Config) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prometheusPort, err := datautil.GetElemByIndex(conf.MsgGateway.Prometheus.Ports, index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rpcPort, err := datautil.GetElemByIndex(conf.MsgGateway.RPC.Ports, index)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -57,7 +53,7 @@ func Start(ctx context.Context, index int, conf *Config) error {
|
||||
return err
|
||||
}
|
||||
|
||||
hubServer := NewServer(rpcPort, prometheusPort, longServer, conf)
|
||||
hubServer := NewServer(rpcPort, longServer, conf)
|
||||
netDone := make(chan error)
|
||||
go func() {
|
||||
err = hubServer.Start(ctx, index, conf)
|
||||
|
||||
@ -17,6 +17,8 @@ package msgtransfer
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
@ -26,11 +28,9 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/mgo"
|
||||
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
@ -44,15 +44,14 @@ import (
|
||||
)
|
||||
|
||||
type MsgTransfer struct {
|
||||
// This consumer aggregated messages, subscribed to the topic:ws2ms_chat,
|
||||
// the modification notification is sent to msg_to_modify topic, the message is stored in redis, Incr Redis,
|
||||
// and then the message is sent to ms2pschat topic for push, and the message is sent to msg_to_mongo topic for persistence
|
||||
historyCH *OnlineHistoryRedisConsumerHandler
|
||||
// This consumer aggregated messages, subscribed to the topic:toRedis,
|
||||
// the message is stored in redis, Incr Redis, and then the message is sent to toPush topic for push,
|
||||
// and the message is sent to toMongo topic for persistence
|
||||
historyCH *OnlineHistoryRedisConsumerHandler
|
||||
//This consumer handle message to mongo
|
||||
historyMongoCH *OnlineHistoryMongoConsumerHandler
|
||||
// mongoDB batch insert, delete messages in redis after success,
|
||||
// and handle the deletion notification message deleted subscriptions topic: msg_to_mongo
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
@ -82,9 +81,8 @@ func Start(ctx context.Context, index int, config *Config) error {
|
||||
}
|
||||
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
|
||||
//todo MsgCacheTimeout
|
||||
msgModel := cache.NewMsgCache(rdb, config.RedisConfig.EnablePipeline)
|
||||
seqModel := cache.NewSeqCache(rdb)
|
||||
msgModel := redis.NewMsgCache(rdb)
|
||||
seqModel := redis.NewSeqCache(rdb)
|
||||
msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
@ -95,37 +93,23 @@ func Start(ctx context.Context, index int, config *Config) error {
|
||||
}
|
||||
conversationRpcClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation)
|
||||
groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
|
||||
msgTransfer, err := NewMsgTransfer(&config.KafkaConfig, msgDatabase, &conversationRpcClient, &groupRpcClient)
|
||||
historyCH, err := NewOnlineHistoryRedisConsumerHandler(&config.KafkaConfig, msgDatabase, &conversationRpcClient, &groupRpcClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
historyMongoCH, err := NewOnlineHistoryMongoConsumerHandler(&config.KafkaConfig, msgDatabase)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgTransfer := &MsgTransfer{
|
||||
historyCH: historyCH,
|
||||
historyMongoCH: historyMongoCH,
|
||||
}
|
||||
return msgTransfer.Start(index, config)
|
||||
}
|
||||
|
||||
func NewMsgTransfer(kafkaConf *config.Kafka, msgDatabase controller.CommonMsgDatabase,
|
||||
conversationRpcClient *rpcclient.ConversationRpcClient, groupRpcClient *rpcclient.GroupRpcClient) (*MsgTransfer, error) {
|
||||
historyCH, err := NewOnlineHistoryRedisConsumerHandler(kafkaConf, msgDatabase, conversationRpcClient, groupRpcClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
historyMongoCH, err := NewOnlineHistoryMongoConsumerHandler(kafkaConf, msgDatabase)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &MsgTransfer{
|
||||
historyCH: historyCH,
|
||||
historyMongoCH: historyMongoCH,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *MsgTransfer) Start(index int, config *Config) error {
|
||||
prometheusPort, err := datautil.GetElemByIndex(config.MsgTransfer.Prometheus.Ports, index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.ctx, m.cancel = context.WithCancel(context.Background())
|
||||
|
||||
var (
|
||||
netDone = make(chan struct{}, 1)
|
||||
netErr error
|
||||
@ -133,16 +117,26 @@ func (m *MsgTransfer) Start(index int, config *Config) error {
|
||||
|
||||
go m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(m.ctx, m.historyCH)
|
||||
go m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(m.ctx, m.historyMongoCH)
|
||||
err := m.historyCH.redisMessageBatches.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if config.MsgTransfer.Prometheus.Enable {
|
||||
go func() {
|
||||
prometheusPort, err := datautil.GetElemByIndex(config.MsgTransfer.Prometheus.Ports, index)
|
||||
if err != nil {
|
||||
netErr = err
|
||||
netDone <- struct{}{}
|
||||
return
|
||||
}
|
||||
proreg := prometheus.NewRegistry()
|
||||
proreg.MustRegister(
|
||||
collectors.NewGoCollector(),
|
||||
)
|
||||
proreg.MustRegister(prommetrics.GetGrpcCusMetrics("Transfer", &config.Share)...)
|
||||
http.Handle("/metrics", promhttp.HandlerFor(proreg, promhttp.HandlerOpts{Registry: proreg}))
|
||||
err := http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), nil)
|
||||
err = http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), nil)
|
||||
if err != nil && err != http.ErrServerClosed {
|
||||
netErr = errs.WrapMsg(err, "prometheus start error", "prometheusPort", prometheusPort)
|
||||
netDone <- struct{}{}
|
||||
@ -157,11 +151,13 @@ func (m *MsgTransfer) Start(index int, config *Config) error {
|
||||
program.SIGTERMExit()
|
||||
// graceful close kafka client.
|
||||
m.cancel()
|
||||
m.historyCH.redisMessageBatches.Close()
|
||||
m.historyCH.historyConsumerGroup.Close()
|
||||
m.historyMongoCH.historyConsumerGroup.Close()
|
||||
return nil
|
||||
case <-netDone:
|
||||
m.cancel()
|
||||
m.historyCH.redisMessageBatches.Close()
|
||||
m.historyCH.historyConsumerGroup.Close()
|
||||
m.historyMongoCH.historyConsumerGroup.Close()
|
||||
close(netDone)
|
||||
|
||||
@ -16,51 +16,34 @@ package msgtransfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/IBM/sarama"
|
||||
"github.com/go-redis/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/tools/batcher"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
"github.com/openimsdk/tools/utils/idutil"
|
||||
"github.com/openimsdk/tools/utils/stringutil"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
ConsumerMsgs = 3
|
||||
SourceMessages = 4
|
||||
MongoMessages = 5
|
||||
ChannelNum = 100
|
||||
size = 500
|
||||
mainDataBuffer = 500
|
||||
subChanBuffer = 50
|
||||
worker = 50
|
||||
interval = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
type MsgChannelValue struct {
|
||||
uniqueKey string
|
||||
ctx context.Context
|
||||
ctxMsgList []*ContextMsg
|
||||
}
|
||||
|
||||
type TriggerChannelValue struct {
|
||||
ctx context.Context
|
||||
cMsgList []*sarama.ConsumerMessage
|
||||
}
|
||||
|
||||
type Cmd2Value struct {
|
||||
Cmd int
|
||||
Value any
|
||||
}
|
||||
type ContextMsg struct {
|
||||
message *sdkws.MsgData
|
||||
ctx context.Context
|
||||
@ -68,13 +51,8 @@ type ContextMsg struct {
|
||||
|
||||
type OnlineHistoryRedisConsumerHandler struct {
|
||||
historyConsumerGroup *kafka.MConsumerGroup
|
||||
chArrays [ChannelNum]chan Cmd2Value
|
||||
msgDistributionCh chan Cmd2Value
|
||||
|
||||
// singleMsgSuccessCount uint64
|
||||
// singleMsgFailedCount uint64
|
||||
// singleMsgSuccessCountMutex sync.Mutex
|
||||
// singleMsgFailedCountMutex sync.Mutex
|
||||
redisMessageBatches *batcher.Batcher[sarama.ConsumerMessage]
|
||||
|
||||
msgDatabase controller.CommonMsgDatabase
|
||||
conversationRpcClient *rpcclient.ConversationRpcClient
|
||||
@ -83,89 +61,82 @@ type OnlineHistoryRedisConsumerHandler struct {
|
||||
|
||||
func NewOnlineHistoryRedisConsumerHandler(kafkaConf *config.Kafka, database controller.CommonMsgDatabase,
|
||||
conversationRpcClient *rpcclient.ConversationRpcClient, groupRpcClient *rpcclient.GroupRpcClient) (*OnlineHistoryRedisConsumerHandler, error) {
|
||||
historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToRedisGroupID, []string{kafkaConf.ToRedisTopic}, true)
|
||||
historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToRedisGroupID, []string{kafkaConf.ToRedisTopic}, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var och OnlineHistoryRedisConsumerHandler
|
||||
och.msgDatabase = database
|
||||
och.msgDistributionCh = make(chan Cmd2Value) // no buffer channel
|
||||
go och.MessagesDistributionHandle()
|
||||
for i := 0; i < ChannelNum; i++ {
|
||||
och.chArrays[i] = make(chan Cmd2Value, 50)
|
||||
go och.Run(i)
|
||||
|
||||
b := batcher.New[sarama.ConsumerMessage](
|
||||
batcher.WithSize(size),
|
||||
batcher.WithWorker(worker),
|
||||
batcher.WithInterval(interval),
|
||||
batcher.WithDataBuffer(mainDataBuffer),
|
||||
batcher.WithSyncWait(true),
|
||||
batcher.WithBuffer(subChanBuffer),
|
||||
)
|
||||
b.Sharding = func(key string) int {
|
||||
hashCode := stringutil.GetHashCode(key)
|
||||
return int(hashCode) % och.redisMessageBatches.Worker()
|
||||
}
|
||||
b.Key = func(consumerMessage *sarama.ConsumerMessage) string {
|
||||
return string(consumerMessage.Key)
|
||||
}
|
||||
b.Do = och.do
|
||||
och.redisMessageBatches = b
|
||||
och.conversationRpcClient = conversationRpcClient
|
||||
och.groupRpcClient = groupRpcClient
|
||||
och.historyConsumerGroup = historyConsumerGroup
|
||||
return &och, err
|
||||
}
|
||||
func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID int, val *batcher.Msg[sarama.ConsumerMessage]) {
|
||||
ctx = mcontext.WithTriggerIDContext(ctx, val.TriggerID())
|
||||
ctxMessages := och.parseConsumerMessages(ctx, val.Val())
|
||||
ctx = withAggregationCtx(ctx, ctxMessages)
|
||||
log.ZInfo(ctx, "msg arrived channel", "channel id", channelID, "msgList length", len(ctxMessages),
|
||||
"key", val.Key())
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) Run(channelID int) {
|
||||
for cmd := range och.chArrays[channelID] {
|
||||
switch cmd.Cmd {
|
||||
case SourceMessages:
|
||||
msgChannelValue := cmd.Value.(MsgChannelValue)
|
||||
ctxMsgList := msgChannelValue.ctxMsgList
|
||||
ctx := msgChannelValue.ctx
|
||||
log.ZDebug(
|
||||
ctx,
|
||||
"msg arrived channel",
|
||||
"channel id",
|
||||
channelID,
|
||||
"msgList length",
|
||||
len(ctxMsgList),
|
||||
"uniqueKey",
|
||||
msgChannelValue.uniqueKey,
|
||||
)
|
||||
storageMsgList, notStorageMsgList, storageNotificationList, notStorageNotificationList, modifyMsgList := och.getPushStorageMsgList(
|
||||
ctxMsgList,
|
||||
)
|
||||
log.ZDebug(
|
||||
ctx,
|
||||
"msg lens",
|
||||
"storageMsgList",
|
||||
len(storageMsgList),
|
||||
"notStorageMsgList",
|
||||
len(notStorageMsgList),
|
||||
"storageNotificationList",
|
||||
len(storageNotificationList),
|
||||
"notStorageNotificationList",
|
||||
len(notStorageNotificationList),
|
||||
"modifyMsgList",
|
||||
len(modifyMsgList),
|
||||
)
|
||||
conversationIDMsg := msgprocessor.GetChatConversationIDByMsg(ctxMsgList[0].message)
|
||||
conversationIDNotification := msgprocessor.GetNotificationConversationIDByMsg(ctxMsgList[0].message)
|
||||
och.handleMsg(ctx, msgChannelValue.uniqueKey, conversationIDMsg, storageMsgList, notStorageMsgList)
|
||||
och.handleNotification(
|
||||
ctx,
|
||||
msgChannelValue.uniqueKey,
|
||||
conversationIDNotification,
|
||||
storageNotificationList,
|
||||
notStorageNotificationList,
|
||||
)
|
||||
if err := och.msgDatabase.MsgToModifyMQ(ctx, msgChannelValue.uniqueKey, conversationIDNotification, modifyMsgList); err != nil {
|
||||
log.ZError(ctx, "msg to modify mq error", err, "uniqueKey", msgChannelValue.uniqueKey, "modifyMsgList", modifyMsgList)
|
||||
}
|
||||
storageMsgList, notStorageMsgList, storageNotificationList, notStorageNotificationList :=
|
||||
och.categorizeMessageLists(ctxMessages)
|
||||
log.ZDebug(ctx, "number of categorized messages", "storageMsgList", len(storageMsgList), "notStorageMsgList",
|
||||
len(notStorageMsgList), "storageNotificationList", len(storageNotificationList), "notStorageNotificationList",
|
||||
len(notStorageNotificationList))
|
||||
|
||||
conversationIDMsg := msgprocessor.GetChatConversationIDByMsg(ctxMessages[0].message)
|
||||
conversationIDNotification := msgprocessor.GetNotificationConversationIDByMsg(ctxMessages[0].message)
|
||||
och.handleMsg(ctx, val.Key(), conversationIDMsg, storageMsgList, notStorageMsgList)
|
||||
och.handleNotification(ctx, val.Key(), conversationIDNotification, storageNotificationList, notStorageNotificationList)
|
||||
}
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) parseConsumerMessages(ctx context.Context, consumerMessages []*sarama.ConsumerMessage) []*ContextMsg {
|
||||
var ctxMessages []*ContextMsg
|
||||
for i := 0; i < len(consumerMessages); i++ {
|
||||
ctxMsg := &ContextMsg{}
|
||||
msgFromMQ := &sdkws.MsgData{}
|
||||
err := proto.Unmarshal(consumerMessages[i].Value, msgFromMQ)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "msg_transfer Unmarshal msg err", err, string(consumerMessages[i].Value))
|
||||
continue
|
||||
}
|
||||
var arr []string
|
||||
for i, header := range consumerMessages[i].Headers {
|
||||
arr = append(arr, strconv.Itoa(i), string(header.Key), string(header.Value))
|
||||
}
|
||||
log.ZDebug(ctx, "consumer.kafka.GetContextWithMQHeader", "len", len(consumerMessages[i].Headers),
|
||||
"header", strings.Join(arr, ", "))
|
||||
ctxMsg.ctx = kafka.GetContextWithMQHeader(consumerMessages[i].Headers)
|
||||
ctxMsg.message = msgFromMQ
|
||||
log.ZDebug(ctx, "message parse finish", "message", msgFromMQ, "key",
|
||||
string(consumerMessages[i].Key))
|
||||
ctxMessages = append(ctxMessages, ctxMsg)
|
||||
}
|
||||
return ctxMessages
|
||||
}
|
||||
|
||||
// Get messages/notifications stored message list, not stored and pushed message list.
|
||||
func (och *OnlineHistoryRedisConsumerHandler) getPushStorageMsgList(
|
||||
totalMsgs []*ContextMsg,
|
||||
) (storageMsgList, notStorageMsgList, storageNotificatoinList, notStorageNotificationList, modifyMsgList []*sdkws.MsgData) {
|
||||
isStorage := func(msg *sdkws.MsgData) bool {
|
||||
options2 := msgprocessor.Options(msg.Options)
|
||||
if options2.IsHistory() {
|
||||
return true
|
||||
}
|
||||
// if !(!options2.IsSenderSync() && conversationID == msg.MsgData.SendID) {
|
||||
// return false
|
||||
// }
|
||||
return false
|
||||
}
|
||||
func (och *OnlineHistoryRedisConsumerHandler) categorizeMessageLists(totalMsgs []*ContextMsg) (storageMsgList,
|
||||
notStorageMsgList, storageNotificationList, notStorageNotificationList []*ContextMsg) {
|
||||
for _, v := range totalMsgs {
|
||||
options := msgprocessor.Options(v.message.Options)
|
||||
if !options.IsNotNotification() {
|
||||
@ -176,190 +147,115 @@ func (och *OnlineHistoryRedisConsumerHandler) getPushStorageMsgList(
|
||||
if v.message.Options != nil {
|
||||
msg.Options = msgprocessor.NewMsgOptions()
|
||||
}
|
||||
if options.IsOfflinePush() {
|
||||
v.message.Options = msgprocessor.WithOptions(
|
||||
v.message.Options,
|
||||
msgprocessor.WithOfflinePush(false),
|
||||
)
|
||||
msg.Options = msgprocessor.WithOptions(msg.Options, msgprocessor.WithOfflinePush(true))
|
||||
msg.Options = msgprocessor.WithOptions(msg.Options,
|
||||
msgprocessor.WithOfflinePush(options.IsOfflinePush()),
|
||||
msgprocessor.WithUnreadCount(options.IsUnreadCount()),
|
||||
)
|
||||
v.message.Options = msgprocessor.WithOptions(
|
||||
v.message.Options,
|
||||
msgprocessor.WithOfflinePush(false),
|
||||
msgprocessor.WithUnreadCount(false),
|
||||
)
|
||||
ctxMsg := &ContextMsg{
|
||||
message: msg,
|
||||
ctx: v.ctx,
|
||||
}
|
||||
if options.IsUnreadCount() {
|
||||
v.message.Options = msgprocessor.WithOptions(
|
||||
v.message.Options,
|
||||
msgprocessor.WithUnreadCount(false),
|
||||
)
|
||||
msg.Options = msgprocessor.WithOptions(msg.Options, msgprocessor.WithUnreadCount(true))
|
||||
}
|
||||
storageMsgList = append(storageMsgList, msg)
|
||||
storageMsgList = append(storageMsgList, ctxMsg)
|
||||
}
|
||||
if isStorage(v.message) {
|
||||
storageNotificatoinList = append(storageNotificatoinList, v.message)
|
||||
if options.IsHistory() {
|
||||
storageNotificationList = append(storageNotificationList, v)
|
||||
} else {
|
||||
notStorageNotificationList = append(notStorageNotificationList, v.message)
|
||||
notStorageNotificationList = append(notStorageNotificationList, v)
|
||||
}
|
||||
} else {
|
||||
if isStorage(v.message) {
|
||||
storageMsgList = append(storageMsgList, v.message)
|
||||
if options.IsHistory() {
|
||||
storageMsgList = append(storageMsgList, v)
|
||||
} else {
|
||||
notStorageMsgList = append(notStorageMsgList, v.message)
|
||||
notStorageMsgList = append(notStorageMsgList, v)
|
||||
}
|
||||
}
|
||||
if v.message.ContentType == constant.ReactionMessageModifier ||
|
||||
v.message.ContentType == constant.ReactionMessageDeleter {
|
||||
modifyMsgList = append(modifyMsgList, v.message)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) handleNotification(
|
||||
ctx context.Context,
|
||||
key, conversationID string,
|
||||
storageList, notStorageList []*sdkws.MsgData,
|
||||
) {
|
||||
func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key, conversationID string, storageList, notStorageList []*ContextMsg) {
|
||||
och.toPushTopic(ctx, key, conversationID, notStorageList)
|
||||
if len(storageList) > 0 {
|
||||
lastSeq, _, err := och.msgDatabase.BatchInsertChat2Cache(ctx, conversationID, storageList)
|
||||
if err != nil {
|
||||
log.ZError(
|
||||
ctx,
|
||||
"notification batch insert to redis error",
|
||||
err,
|
||||
"conversationID",
|
||||
conversationID,
|
||||
"storageList",
|
||||
storageList,
|
||||
)
|
||||
return
|
||||
}
|
||||
log.ZDebug(ctx, "success to next topic", "conversationID", conversationID)
|
||||
err = och.msgDatabase.MsgToMongoMQ(ctx, key, conversationID, storageList, lastSeq)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "MsgToMongoMQ error", err)
|
||||
}
|
||||
och.toPushTopic(ctx, key, conversationID, storageList)
|
||||
var storageMessageList []*sdkws.MsgData
|
||||
for _, msg := range storageList {
|
||||
storageMessageList = append(storageMessageList, msg.message)
|
||||
}
|
||||
}
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(ctx context.Context, key, conversationID string, msgs []*sdkws.MsgData) {
|
||||
for _, v := range msgs {
|
||||
och.msgDatabase.MsgToPushMQ(ctx, key, conversationID, v) // nolint: errcheck
|
||||
}
|
||||
}
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key, conversationID string, storageList, notStorageList []*sdkws.MsgData) {
|
||||
och.toPushTopic(ctx, key, conversationID, notStorageList)
|
||||
if len(storageList) > 0 {
|
||||
lastSeq, isNewConversation, err := och.msgDatabase.BatchInsertChat2Cache(ctx, conversationID, storageList)
|
||||
if len(storageMessageList) > 0 {
|
||||
msg := storageMessageList[0]
|
||||
lastSeq, isNewConversation, err := och.msgDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList)
|
||||
if err != nil && errs.Unwrap(err) != redis.Nil {
|
||||
log.ZError(ctx, "batch data insert to redis err", err, "storageMsgList", storageList)
|
||||
log.ZError(ctx, "batch data insert to redis err", err, "storageMsgList", storageMessageList)
|
||||
return
|
||||
}
|
||||
if isNewConversation {
|
||||
switch storageList[0].SessionType {
|
||||
switch msg.SessionType {
|
||||
case constant.ReadGroupChatType:
|
||||
log.ZInfo(ctx, "group chat first create conversation", "conversationID",
|
||||
conversationID)
|
||||
userIDs, err := och.groupRpcClient.GetGroupMemberIDs(ctx, storageList[0].GroupID)
|
||||
userIDs, err := och.groupRpcClient.GetGroupMemberIDs(ctx, msg.GroupID)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "get group member ids error", err, "conversationID",
|
||||
conversationID)
|
||||
} else {
|
||||
if err := och.conversationRpcClient.GroupChatFirstCreateConversation(ctx,
|
||||
storageList[0].GroupID, userIDs); err != nil {
|
||||
msg.GroupID, userIDs); err != nil {
|
||||
log.ZWarn(ctx, "single chat first create conversation error", err,
|
||||
"conversationID", conversationID)
|
||||
}
|
||||
}
|
||||
case constant.SingleChatType, constant.NotificationChatType:
|
||||
if err := och.conversationRpcClient.SingleChatFirstCreateConversation(ctx, storageList[0].RecvID,
|
||||
storageList[0].SendID, conversationID, storageList[0].SessionType); err != nil {
|
||||
if err := och.conversationRpcClient.SingleChatFirstCreateConversation(ctx, msg.RecvID,
|
||||
msg.SendID, conversationID, msg.SessionType); err != nil {
|
||||
log.ZWarn(ctx, "single chat or notification first create conversation error", err,
|
||||
"conversationID", conversationID, "sessionType", storageList[0].SessionType)
|
||||
"conversationID", conversationID, "sessionType", msg.SessionType)
|
||||
}
|
||||
default:
|
||||
log.ZWarn(ctx, "unknown session type", nil, "sessionType",
|
||||
storageList[0].SessionType)
|
||||
msg.SessionType)
|
||||
}
|
||||
}
|
||||
|
||||
log.ZDebug(ctx, "success incr to next topic")
|
||||
err = och.msgDatabase.MsgToMongoMQ(ctx, key, conversationID, storageList, lastSeq)
|
||||
err = och.msgDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "MsgToMongoMQ error", err)
|
||||
log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID",
|
||||
conversationID, "storageList", storageMessageList, "lastSeq", lastSeq)
|
||||
}
|
||||
och.toPushTopic(ctx, key, conversationID, storageList)
|
||||
}
|
||||
}
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) MessagesDistributionHandle() {
|
||||
for {
|
||||
aggregationMsgs := make(map[string][]*ContextMsg, ChannelNum)
|
||||
select {
|
||||
case cmd := <-och.msgDistributionCh:
|
||||
switch cmd.Cmd {
|
||||
case ConsumerMsgs:
|
||||
triggerChannelValue := cmd.Value.(TriggerChannelValue)
|
||||
ctx := triggerChannelValue.ctx
|
||||
consumerMessages := triggerChannelValue.cMsgList
|
||||
// Aggregation map[userid]message list
|
||||
log.ZDebug(ctx, "batch messages come to distribution center", "length", len(consumerMessages))
|
||||
for i := 0; i < len(consumerMessages); i++ {
|
||||
ctxMsg := &ContextMsg{}
|
||||
msgFromMQ := &sdkws.MsgData{}
|
||||
err := proto.Unmarshal(consumerMessages[i].Value, msgFromMQ)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "msg_transfer Unmarshal msg err", err, string(consumerMessages[i].Value))
|
||||
continue
|
||||
}
|
||||
var arr []string
|
||||
for i, header := range consumerMessages[i].Headers {
|
||||
arr = append(arr, strconv.Itoa(i), string(header.Key), string(header.Value))
|
||||
}
|
||||
log.ZInfo(ctx, "consumer.kafka.GetContextWithMQHeader", "len", len(consumerMessages[i].Headers),
|
||||
"header", strings.Join(arr, ", "))
|
||||
ctxMsg.ctx = kafka.GetContextWithMQHeader(consumerMessages[i].Headers)
|
||||
ctxMsg.message = msgFromMQ
|
||||
log.ZDebug(
|
||||
ctx,
|
||||
"single msg come to distribution center",
|
||||
"message",
|
||||
msgFromMQ,
|
||||
"key",
|
||||
string(consumerMessages[i].Key),
|
||||
)
|
||||
// aggregationMsgs[string(consumerMessages[i].Key)] =
|
||||
// append(aggregationMsgs[string(consumerMessages[i].Key)], ctxMsg)
|
||||
if oldM, ok := aggregationMsgs[string(consumerMessages[i].Key)]; ok {
|
||||
oldM = append(oldM, ctxMsg)
|
||||
aggregationMsgs[string(consumerMessages[i].Key)] = oldM
|
||||
} else {
|
||||
m := make([]*ContextMsg, 0, 100)
|
||||
m = append(m, ctxMsg)
|
||||
aggregationMsgs[string(consumerMessages[i].Key)] = m
|
||||
}
|
||||
}
|
||||
log.ZDebug(ctx, "generate map list users len", "length", len(aggregationMsgs))
|
||||
for uniqueKey, v := range aggregationMsgs {
|
||||
if len(v) >= 0 {
|
||||
hashCode := stringutil.GetHashCode(uniqueKey)
|
||||
channelID := hashCode % ChannelNum
|
||||
newCtx := withAggregationCtx(ctx, v)
|
||||
log.ZDebug(
|
||||
newCtx,
|
||||
"generate channelID",
|
||||
"hashCode",
|
||||
hashCode,
|
||||
"channelID",
|
||||
channelID,
|
||||
"uniqueKey",
|
||||
uniqueKey,
|
||||
)
|
||||
och.chArrays[channelID] <- Cmd2Value{Cmd: SourceMessages, Value: MsgChannelValue{uniqueKey: uniqueKey, ctxMsgList: v, ctx: newCtx}}
|
||||
}
|
||||
}
|
||||
}
|
||||
func (och *OnlineHistoryRedisConsumerHandler) handleNotification(ctx context.Context, key, conversationID string,
|
||||
storageList, notStorageList []*ContextMsg) {
|
||||
och.toPushTopic(ctx, key, conversationID, notStorageList)
|
||||
var storageMessageList []*sdkws.MsgData
|
||||
for _, msg := range storageList {
|
||||
storageMessageList = append(storageMessageList, msg.message)
|
||||
}
|
||||
if len(storageMessageList) > 0 {
|
||||
lastSeq, _, err := och.msgDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "notification batch insert to redis error", err, "conversationID", conversationID,
|
||||
"storageList", storageMessageList)
|
||||
return
|
||||
}
|
||||
log.ZDebug(ctx, "success to next topic", "conversationID", conversationID)
|
||||
err = och.msgDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID",
|
||||
conversationID, "storageList", storageMessageList, "lastSeq", lastSeq)
|
||||
}
|
||||
och.toPushTopic(ctx, key, conversationID, storageList)
|
||||
}
|
||||
}
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(_ context.Context, key, conversationID string, msgs []*ContextMsg) {
|
||||
for _, v := range msgs {
|
||||
och.msgDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message)
|
||||
}
|
||||
}
|
||||
|
||||
@ -382,106 +278,30 @@ func (och *OnlineHistoryRedisConsumerHandler) Cleanup(_ sarama.ConsumerGroupSess
|
||||
return nil
|
||||
}
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(
|
||||
sess sarama.ConsumerGroupSession,
|
||||
claim sarama.ConsumerGroupClaim,
|
||||
) error { // a instance in the consumer group
|
||||
for {
|
||||
if sess == nil {
|
||||
log.ZWarn(context.Background(), "sess == nil, waiting", nil)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession,
|
||||
claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group
|
||||
log.ZInfo(context.Background(), "online new session msg come", "highWaterMarkOffset",
|
||||
claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition())
|
||||
|
||||
var (
|
||||
split = 1000
|
||||
rwLock = new(sync.RWMutex)
|
||||
messages = make([]*sarama.ConsumerMessage, 0, 1000)
|
||||
ticker = time.NewTicker(time.Millisecond * 100)
|
||||
|
||||
wg = sync.WaitGroup{}
|
||||
running = new(atomic.Bool)
|
||||
)
|
||||
running.Store(true)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// if the buffer is empty and running is false, return loop.
|
||||
if len(messages) == 0 {
|
||||
if !running.Load() {
|
||||
return
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
rwLock.Lock()
|
||||
buffer := make([]*sarama.ConsumerMessage, 0, len(messages))
|
||||
buffer = append(buffer, messages...)
|
||||
|
||||
// reuse slice, set cap to 0
|
||||
messages = messages[:0]
|
||||
rwLock.Unlock()
|
||||
|
||||
start := time.Now()
|
||||
ctx := mcontext.WithTriggerIDContext(context.Background(), idutil.OperationIDGenerator())
|
||||
log.ZDebug(ctx, "timer trigger msg consumer start", "length", len(buffer))
|
||||
for i := 0; i < len(buffer)/split; i++ {
|
||||
och.msgDistributionCh <- Cmd2Value{Cmd: ConsumerMsgs, Value: TriggerChannelValue{
|
||||
ctx: ctx, cMsgList: buffer[i*split : (i+1)*split],
|
||||
}}
|
||||
}
|
||||
if (len(buffer) % split) > 0 {
|
||||
och.msgDistributionCh <- Cmd2Value{Cmd: ConsumerMsgs, Value: TriggerChannelValue{
|
||||
ctx: ctx, cMsgList: buffer[split*(len(buffer)/split):],
|
||||
}}
|
||||
}
|
||||
|
||||
log.ZDebug(ctx, "timer trigger msg consumer end",
|
||||
"length", len(buffer), "time_cost", time.Since(start),
|
||||
)
|
||||
och.redisMessageBatches.OnComplete = func(lastMessage *sarama.ConsumerMessage, totalCount int) {
|
||||
session.MarkMessage(lastMessage, "")
|
||||
session.Commit()
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case msg, ok := <-claim.Messages():
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
for running.Load() {
|
||||
select {
|
||||
case msg, ok := <-claim.Messages():
|
||||
if !ok {
|
||||
running.Store(false)
|
||||
return
|
||||
}
|
||||
|
||||
if len(msg.Value) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
rwLock.Lock()
|
||||
messages = append(messages, msg)
|
||||
rwLock.Unlock()
|
||||
|
||||
sess.MarkMessage(msg, "")
|
||||
|
||||
case <-sess.Context().Done():
|
||||
running.Store(false)
|
||||
return
|
||||
if len(msg.Value) == 0 {
|
||||
continue
|
||||
}
|
||||
err := och.redisMessageBatches.Put(context.Background(), msg)
|
||||
if err != nil {
|
||||
log.ZWarn(context.Background(), "put msg to error", err, "msg", msg)
|
||||
}
|
||||
case <-session.Context().Done():
|
||||
return nil
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -19,8 +19,8 @@ import (
|
||||
|
||||
"github.com/IBM/sarama"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
pbmsg "github.com/openimsdk/protocol/msg"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
@ -89,7 +89,6 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont
|
||||
msgFromMQ.ConversationID,
|
||||
)
|
||||
}
|
||||
mc.msgDatabase.DelUserDeleteMsgsList(ctx, msgFromMQ.ConversationID, seqs)
|
||||
}
|
||||
|
||||
func (OnlineHistoryMongoConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil }
|
||||
|
||||
@ -22,7 +22,7 @@ import (
|
||||
firebase "firebase.google.com/go"
|
||||
"firebase.google.com/go/messaging"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
|
||||
@ -24,7 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
|
||||
@ -22,7 +22,7 @@ import (
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/jpush"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
pbpush "github.com/openimsdk/protocol/push"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
@ -49,7 +49,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cacheModel := cache.NewThirdCache(rdb)
|
||||
cacheModel := redis.NewThirdCache(rdb)
|
||||
offlinePusher, err := offlinepush.NewOfflinePusher(&config.RpcConfig, cacheModel)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@ -17,6 +17,7 @@ package push
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/IBM/sarama"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
@ -25,20 +26,18 @@ import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/utils/jsonutil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
|
||||
"github.com/IBM/sarama"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
pbchat "github.com/openimsdk/protocol/msg"
|
||||
pbpush "github.com/openimsdk/protocol/push"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/jsonutil"
|
||||
"github.com/openimsdk/tools/utils/timeutil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@ -162,7 +161,8 @@ func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *
|
||||
|
||||
err = c.offlinePushMsg(ctx, msg, offlinePUshUserID)
|
||||
if err != nil {
|
||||
return err
|
||||
log.ZWarn(ctx, "offlinePushMsg failed", err, "offlinePUshUserID", offlinePUshUserID, "msg", msg)
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -223,8 +223,8 @@ func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *s
|
||||
|
||||
err = c.offlinePushMsg(ctx, msg, needOfflinePushUserIDs)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "offlinePushMsg failed", err, "groupID", groupID, "msg", msg)
|
||||
return err
|
||||
log.ZWarn(ctx, "offlinePushMsg failed", err, "groupID", groupID, "msg", msg)
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -17,14 +17,14 @@ package auth
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
pbauth "github.com/openimsdk/protocol/auth"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
@ -61,7 +61,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
userRpcClient: &userRpcClient,
|
||||
RegisterCenter: client,
|
||||
authDatabase: controller.NewAuthDatabase(
|
||||
cache.NewTokenCacheModel(rdb),
|
||||
redis2.NewTokenCacheModel(rdb),
|
||||
config.Share.Secret,
|
||||
config.RpcConfig.TokenPolicy.Expire,
|
||||
),
|
||||
|
||||
@ -17,15 +17,16 @@ package conversation
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"sort"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/mgo"
|
||||
tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
pbconversation "github.com/openimsdk/protocol/conversation"
|
||||
@ -73,13 +74,14 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
|
||||
msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg)
|
||||
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
|
||||
cache.InitLocalCache(&config.LocalCacheConfig)
|
||||
localcache.InitLocalCache(&config.LocalCacheConfig)
|
||||
pbconversation.RegisterConversationServer(server, &conversationServer{
|
||||
msgRpcClient: &msgRpcClient,
|
||||
user: &userRpcClient,
|
||||
conversationNotificationSender: NewConversationNotificationSender(&config.NotificationConfig, &msgRpcClient),
|
||||
groupRpcClient: &groupRpcClient,
|
||||
conversationDatabase: controller.NewConversationDatabase(conversationDB, cache.NewConversationRedis(rdb, &config.LocalCacheConfig, cache.GetDefaultOpt(), conversationDB), mgocli.GetTx()),
|
||||
conversationDatabase: controller.NewConversationDatabase(conversationDB,
|
||||
redis.NewConversationRedis(rdb, &config.LocalCacheConfig, redis.GetRocksCacheOptions(), conversationDB), mgocli.GetTx()),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
@ -192,11 +194,11 @@ func (c *conversationServer) GetConversations(ctx context.Context, req *pbconver
|
||||
}
|
||||
|
||||
func (c *conversationServer) SetConversation(ctx context.Context, req *pbconversation.SetConversationReq) (*pbconversation.SetConversationResp, error) {
|
||||
var conversation tablerelation.ConversationModel
|
||||
var conversation tablerelation.Conversation
|
||||
if err := datautil.CopyStructFields(&conversation, req.Conversation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err := c.conversationDatabase.SetUserConversations(ctx, req.Conversation.OwnerUserID, []*tablerelation.ConversationModel{&conversation})
|
||||
err := c.conversationDatabase.SetUserConversations(ctx, req.Conversation.OwnerUserID, []*tablerelation.Conversation{&conversation})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -220,7 +222,7 @@ func (c *conversationServer) SetConversations(ctx context.Context, req *pbconver
|
||||
}
|
||||
}
|
||||
var unequal int
|
||||
var conv tablerelation.ConversationModel
|
||||
var conv tablerelation.Conversation
|
||||
if len(req.UserIDs) == 1 {
|
||||
cs, err := c.conversationDatabase.FindConversations(ctx, req.UserIDs[0], []string{req.Conversation.ConversationID})
|
||||
if err != nil {
|
||||
@ -231,7 +233,7 @@ func (c *conversationServer) SetConversations(ctx context.Context, req *pbconver
|
||||
}
|
||||
conv = *cs[0]
|
||||
}
|
||||
var conversation tablerelation.ConversationModel
|
||||
var conversation tablerelation.Conversation
|
||||
conversation.ConversationID = req.Conversation.ConversationID
|
||||
conversation.ConversationType = req.Conversation.ConversationType
|
||||
conversation.UserID = req.Conversation.UserID
|
||||
@ -280,7 +282,7 @@ func (c *conversationServer) SetConversations(ctx context.Context, req *pbconver
|
||||
}
|
||||
}
|
||||
if req.Conversation.IsPrivateChat != nil && req.Conversation.ConversationType != constant.ReadGroupChatType {
|
||||
var conversations []*tablerelation.ConversationModel
|
||||
var conversations []*tablerelation.Conversation
|
||||
for _, ownerUserID := range req.UserIDs {
|
||||
conversation2 := conversation
|
||||
conversation2.OwnerUserID = ownerUserID
|
||||
@ -328,12 +330,12 @@ func (c *conversationServer) CreateSingleChatConversations(ctx context.Context,
|
||||
) (*pbconversation.CreateSingleChatConversationsResp, error) {
|
||||
switch req.ConversationType {
|
||||
case constant.SingleChatType:
|
||||
var conversation tablerelation.ConversationModel
|
||||
var conversation tablerelation.Conversation
|
||||
conversation.ConversationID = req.ConversationID
|
||||
conversation.ConversationType = req.ConversationType
|
||||
conversation.OwnerUserID = req.SendID
|
||||
conversation.UserID = req.RecvID
|
||||
err := c.conversationDatabase.CreateConversation(ctx, []*tablerelation.ConversationModel{&conversation})
|
||||
err := c.conversationDatabase.CreateConversation(ctx, []*tablerelation.Conversation{&conversation})
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "create conversation failed", err, "conversation", conversation)
|
||||
}
|
||||
@ -341,17 +343,17 @@ func (c *conversationServer) CreateSingleChatConversations(ctx context.Context,
|
||||
conversation2 := conversation
|
||||
conversation2.OwnerUserID = req.RecvID
|
||||
conversation2.UserID = req.SendID
|
||||
err = c.conversationDatabase.CreateConversation(ctx, []*tablerelation.ConversationModel{&conversation2})
|
||||
err = c.conversationDatabase.CreateConversation(ctx, []*tablerelation.Conversation{&conversation2})
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "create conversation failed", err, "conversation2", conversation)
|
||||
}
|
||||
case constant.NotificationChatType:
|
||||
var conversation tablerelation.ConversationModel
|
||||
var conversation tablerelation.Conversation
|
||||
conversation.ConversationID = req.ConversationID
|
||||
conversation.ConversationType = req.ConversationType
|
||||
conversation.OwnerUserID = req.RecvID
|
||||
conversation.UserID = req.SendID
|
||||
err := c.conversationDatabase.CreateConversation(ctx, []*tablerelation.ConversationModel{&conversation})
|
||||
err := c.conversationDatabase.CreateConversation(ctx, []*tablerelation.Conversation{&conversation})
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "create conversation failed", err, "conversation2", conversation)
|
||||
}
|
||||
|
||||
@ -16,11 +16,11 @@ package friend
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
pbfriend "github.com/openimsdk/protocol/friend"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
)
|
||||
@ -58,7 +58,7 @@ func (s *friendServer) RemoveBlack(ctx context.Context, req *pbfriend.RemoveBlac
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := s.blackDatabase.Delete(ctx, []*relation.BlackModel{{OwnerUserID: req.OwnerUserID, BlockUserID: req.BlackUserID}}); err != nil {
|
||||
if err := s.blackDatabase.Delete(ctx, []*model.Black{{OwnerUserID: req.OwnerUserID, BlockUserID: req.BlackUserID}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -75,7 +75,7 @@ func (s *friendServer) AddBlack(ctx context.Context, req *pbfriend.AddBlackReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
black := relation.BlackModel{
|
||||
black := model.Black{
|
||||
OwnerUserID: req.OwnerUserID,
|
||||
BlockUserID: req.BlackUserID,
|
||||
OperatorUserID: mcontext.GetOpUserID(ctx),
|
||||
@ -83,7 +83,7 @@ func (s *friendServer) AddBlack(ctx context.Context, req *pbfriend.AddBlackReq)
|
||||
Ex: req.Ex,
|
||||
}
|
||||
|
||||
if err := s.blackDatabase.Create(ctx, []*relation.BlackModel{&black}); err != nil {
|
||||
if err := s.blackDatabase.Create(ctx, []*model.Black{&black}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.notificationSender.BlackAddedNotification(ctx, req)
|
||||
|
||||
@ -17,16 +17,17 @@ package friend
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/mgo"
|
||||
tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
pbfriend "github.com/openimsdk/protocol/friend"
|
||||
@ -96,19 +97,19 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
&msgRpcClient,
|
||||
WithRpcFunc(userRpcClient.GetUsersInfo),
|
||||
)
|
||||
cache.InitLocalCache(&config.LocalCacheConfig)
|
||||
localcache.InitLocalCache(&config.LocalCacheConfig)
|
||||
|
||||
// Register Friend server with refactored MongoDB and Redis integrations
|
||||
pbfriend.RegisterFriendServer(server, &friendServer{
|
||||
friendDatabase: controller.NewFriendDatabase(
|
||||
friendMongoDB,
|
||||
friendRequestMongoDB,
|
||||
cache.NewFriendCacheRedis(rdb, &config.LocalCacheConfig, friendMongoDB, cache.GetDefaultOpt()),
|
||||
redis.NewFriendCacheRedis(rdb, &config.LocalCacheConfig, friendMongoDB, redis.GetRocksCacheOptions()),
|
||||
mgocli.GetTx(),
|
||||
),
|
||||
blackDatabase: controller.NewBlackDatabase(
|
||||
blackMongoDB,
|
||||
cache.NewBlackCacheRedis(rdb, &config.LocalCacheConfig, blackMongoDB, cache.GetDefaultOpt()),
|
||||
redis.NewBlackCacheRedis(rdb, &config.LocalCacheConfig, blackMongoDB, redis.GetRocksCacheOptions()),
|
||||
),
|
||||
userRpcClient: &userRpcClient,
|
||||
notificationSender: notificationSender,
|
||||
@ -193,7 +194,7 @@ func (s *friendServer) RespondFriendApply(ctx context.Context, req *pbfriend.Res
|
||||
return nil, err
|
||||
}
|
||||
|
||||
friendRequest := tablerelation.FriendRequestModel{
|
||||
friendRequest := model.FriendRequest{
|
||||
FromUserID: req.FromUserID,
|
||||
ToUserID: req.ToUserID,
|
||||
HandleMsg: req.HandleMsg,
|
||||
@ -384,10 +385,10 @@ func (s *friendServer) GetSpecifiedFriendsInfo(ctx context.Context, req *pbfrien
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
friendMap := datautil.SliceToMap(friends, func(e *tablerelation.FriendModel) string {
|
||||
friendMap := datautil.SliceToMap(friends, func(e *model.Friend) string {
|
||||
return e.FriendUserID
|
||||
})
|
||||
blackMap := datautil.SliceToMap(blacks, func(e *tablerelation.BlackModel) string {
|
||||
blackMap := datautil.SliceToMap(blacks, func(e *model.Black) string {
|
||||
return e.BlockUserID
|
||||
})
|
||||
resp := &pbfriend.GetSpecifiedFriendsInfoResp{
|
||||
|
||||
@ -16,11 +16,11 @@ package friend
|
||||
|
||||
import (
|
||||
"context"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
@ -46,7 +46,7 @@ func WithFriendDB(db controller.FriendDatabase) friendNotificationSenderOptions
|
||||
}
|
||||
|
||||
func WithDBFunc(
|
||||
fn func(ctx context.Context, userIDs []string) (users []*relationtb.UserModel, err error),
|
||||
fn func(ctx context.Context, userIDs []string) (users []*relationtb.User, err error),
|
||||
) friendNotificationSenderOptions {
|
||||
return func(s *FriendNotificationSender) {
|
||||
f := func(ctx context.Context, userIDs []string) (result []notification.CommonUser, err error) {
|
||||
|
||||
@ -19,7 +19,7 @@ import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/apistruct"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/callbackstruct"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/group"
|
||||
@ -100,7 +100,7 @@ func (s *groupServer) webhookAfterCreateGroup(ctx context.Context, after *config
|
||||
s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, &callbackstruct.CallbackAfterCreateGroupResp{}, after)
|
||||
}
|
||||
|
||||
func (s *groupServer) webhookBeforeMemberJoinGroup(ctx context.Context, before *config.BeforeConfig, groupMember *relation.GroupMemberModel, groupEx string) error {
|
||||
func (s *groupServer) webhookBeforeMemberJoinGroup(ctx context.Context, before *config.BeforeConfig, groupMember *model.GroupMember, groupEx string) error {
|
||||
return webhook.WithCondition(ctx, before, func(ctx context.Context) error {
|
||||
cbReq := &callbackstruct.CallbackBeforeMemberJoinGroupReq{
|
||||
CallbackCommand: callbackstruct.CallbackBeforeMemberJoinGroupCommand,
|
||||
|
||||
@ -15,11 +15,11 @@
|
||||
package group
|
||||
|
||||
import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
)
|
||||
|
||||
func (s *groupServer) groupDB2PB(group *relation.GroupModel, ownerUserID string, memberCount uint32) *sdkws.GroupInfo {
|
||||
func (s *groupServer) groupDB2PB(group *model.Group, ownerUserID string, memberCount uint32) *sdkws.GroupInfo {
|
||||
return &sdkws.GroupInfo{
|
||||
GroupID: group.GroupID,
|
||||
GroupName: group.GroupName,
|
||||
@ -41,7 +41,7 @@ func (s *groupServer) groupDB2PB(group *relation.GroupModel, ownerUserID string,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *groupServer) groupMemberDB2PB(member *relation.GroupMemberModel, appMangerLevel int32) *sdkws.GroupMemberFullInfo {
|
||||
func (s *groupServer) groupMemberDB2PB(member *model.GroupMember, appMangerLevel int32) *sdkws.GroupMemberFullInfo {
|
||||
return &sdkws.GroupMemberFullInfo{
|
||||
GroupID: member.GroupID,
|
||||
UserID: member.UserID,
|
||||
|
||||
@ -16,10 +16,9 @@ package group
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
)
|
||||
|
||||
func (s *groupServer) PopulateGroupMember(ctx context.Context, members ...*relationtb.GroupMemberModel) error {
|
||||
func (s *groupServer) PopulateGroupMember(ctx context.Context, members ...*relationtb.GroupMember) error {
|
||||
return s.notification.PopulateGroupMember(ctx, members...)
|
||||
}
|
||||
|
||||
@ -18,8 +18,11 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/common"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
@ -29,10 +32,8 @@ import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/callbackstruct"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/mgo"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient/grouphash"
|
||||
@ -110,7 +111,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
}
|
||||
return datautil.Slice(users, func(e *sdkws.UserInfo) notification.CommonUser { return e }), nil
|
||||
})
|
||||
cache.InitLocalCache(&config.LocalCacheConfig)
|
||||
localcache.InitLocalCache(&config.LocalCacheConfig)
|
||||
gs.conversationRpcClient = conversationRpcClient
|
||||
gs.msgRpcClient = msgRpcClient
|
||||
gs.config = config
|
||||
@ -234,14 +235,14 @@ func (s *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupR
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var groupMembers []*relationtb.GroupMemberModel
|
||||
var groupMembers []*model.GroupMember
|
||||
group := convert.Pb2DBGroupInfo(req.GroupInfo)
|
||||
if err := s.GenGroupID(ctx, &group.GroupID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
joinGroup := func(userID string, roleLevel int32) error {
|
||||
groupMember := &relationtb.GroupMemberModel{
|
||||
groupMember := &model.GroupMember{
|
||||
GroupID: group.GroupID,
|
||||
UserID: userID,
|
||||
RoleLevel: roleLevel,
|
||||
@ -271,7 +272,7 @@ func (s *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupR
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := s.db.CreateGroup(ctx, []*relationtb.GroupModel{group}, groupMembers); err != nil {
|
||||
if err := s.db.CreateGroup(ctx, []*model.Group{group}, groupMembers); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp := &pbgroup.CreateGroupResp{GroupInfo: &sdkws.GroupInfo{}}
|
||||
@ -291,28 +292,7 @@ func (s *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupR
|
||||
break
|
||||
}
|
||||
}
|
||||
if req.GroupInfo.GroupType == constant.SuperGroup {
|
||||
go func() {
|
||||
for _, userID := range userIDs {
|
||||
s.notification.SuperGroupNotification(ctx, userID, userID)
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
tips := &sdkws.GroupCreatedTips{
|
||||
Group: resp.GroupInfo,
|
||||
OperationTime: group.CreateTime.UnixMilli(),
|
||||
GroupOwnerUser: s.groupMemberDB2PB(groupMembers[0], userMap[groupMembers[0].UserID].AppMangerLevel),
|
||||
}
|
||||
for _, member := range groupMembers {
|
||||
member.Nickname = userMap[member.UserID].Nickname
|
||||
tips.MemberList = append(tips.MemberList, s.groupMemberDB2PB(member, userMap[member.UserID].AppMangerLevel))
|
||||
if member.UserID == opUserID {
|
||||
tips.OpUser = s.groupMemberDB2PB(member, userMap[member.UserID].AppMangerLevel)
|
||||
break
|
||||
}
|
||||
}
|
||||
s.notification.GroupCreatedNotification(ctx, tips)
|
||||
}
|
||||
s.notification.GroupCreatedNotification(ctx, tips)
|
||||
|
||||
reqCallBackAfter := &pbgroup.CreateGroupReq{
|
||||
MemberUserIDs: userIDs,
|
||||
@ -339,7 +319,7 @@ func (s *groupServer) GetJoinedGroupList(ctx context.Context, req *pbgroup.GetJo
|
||||
if len(members) == 0 {
|
||||
return &resp, nil
|
||||
}
|
||||
groupIDs := datautil.Slice(members, func(e *relationtb.GroupMemberModel) string {
|
||||
groupIDs := datautil.Slice(members, func(e *model.GroupMember) string {
|
||||
return e.GroupID
|
||||
})
|
||||
groups, err := s.db.FindGroup(ctx, groupIDs)
|
||||
@ -357,12 +337,12 @@ func (s *groupServer) GetJoinedGroupList(ctx context.Context, req *pbgroup.GetJo
|
||||
if err := s.PopulateGroupMember(ctx, members...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ownerMap := datautil.SliceToMap(owners, func(e *relationtb.GroupMemberModel) string {
|
||||
ownerMap := datautil.SliceToMap(owners, func(e *model.GroupMember) string {
|
||||
return e.GroupID
|
||||
})
|
||||
resp.Groups = datautil.Slice(datautil.Order(groupIDs, groups, func(group *relationtb.GroupModel) string {
|
||||
resp.Groups = datautil.Slice(datautil.Order(groupIDs, groups, func(group *model.Group) string {
|
||||
return group.GroupID
|
||||
}), func(group *relationtb.GroupModel) *sdkws.GroupInfo {
|
||||
}), func(group *model.Group) *sdkws.GroupInfo {
|
||||
var userID string
|
||||
if user := ownerMap[group.GroupID]; user != nil {
|
||||
userID = user.UserID
|
||||
@ -397,7 +377,7 @@ func (s *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.Invite
|
||||
return nil, errs.ErrRecordNotFound.WrapMsg("user not found")
|
||||
}
|
||||
|
||||
var groupMember *relationtb.GroupMemberModel
|
||||
var groupMember *model.GroupMember
|
||||
var opUserID string
|
||||
if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) {
|
||||
opUserID = mcontext.GetOpUserID(ctx)
|
||||
@ -418,9 +398,9 @@ func (s *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.Invite
|
||||
if group.NeedVerification == constant.AllNeedVerification {
|
||||
if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) {
|
||||
if !(groupMember.RoleLevel == constant.GroupOwner || groupMember.RoleLevel == constant.GroupAdmin) {
|
||||
var requests []*relationtb.GroupRequestModel
|
||||
var requests []*model.GroupRequest
|
||||
for _, userID := range req.InvitedUserIDs {
|
||||
requests = append(requests, &relationtb.GroupRequestModel{
|
||||
requests = append(requests, &model.GroupRequest{
|
||||
UserID: userID,
|
||||
GroupID: req.GroupID,
|
||||
JoinSource: constant.JoinByInvitation,
|
||||
@ -444,9 +424,9 @@ func (s *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.Invite
|
||||
}
|
||||
}
|
||||
}
|
||||
var groupMembers []*relationtb.GroupMemberModel
|
||||
var groupMembers []*model.GroupMember
|
||||
for _, userID := range req.InvitedUserIDs {
|
||||
member := &relationtb.GroupMemberModel{
|
||||
member := &model.GroupMember{
|
||||
GroupID: req.GroupID,
|
||||
UserID: userID,
|
||||
RoleLevel: constant.GroupOrdinaryUsers,
|
||||
@ -482,7 +462,7 @@ func (s *groupServer) GetGroupAllMember(ctx context.Context, req *pbgroup.GetGro
|
||||
return nil, err
|
||||
}
|
||||
var resp pbgroup.GetGroupAllMemberResp
|
||||
resp.Members = datautil.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
resp.Members = datautil.Slice(members, func(e *model.GroupMember) *sdkws.GroupMemberFullInfo {
|
||||
return convert.Db2PbGroupMember(e)
|
||||
})
|
||||
return &resp, nil
|
||||
@ -491,7 +471,7 @@ func (s *groupServer) GetGroupAllMember(ctx context.Context, req *pbgroup.GetGro
|
||||
func (s *groupServer) GetGroupMemberList(ctx context.Context, req *pbgroup.GetGroupMemberListReq) (*pbgroup.GetGroupMemberListResp, error) {
|
||||
var (
|
||||
total int64
|
||||
members []*relationtb.GroupMemberModel
|
||||
members []*model.GroupMember
|
||||
err error
|
||||
)
|
||||
if req.Keyword == "" {
|
||||
@ -506,7 +486,7 @@ func (s *groupServer) GetGroupMemberList(ctx context.Context, req *pbgroup.GetGr
|
||||
return nil, err
|
||||
}
|
||||
if req.Keyword != "" {
|
||||
groupMembers := make([]*relationtb.GroupMemberModel, 0)
|
||||
groupMembers := make([]*model.GroupMember, 0)
|
||||
for _, member := range members {
|
||||
if member.UserID == req.Keyword {
|
||||
groupMembers = append(groupMembers, member)
|
||||
@ -554,7 +534,7 @@ func (s *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGrou
|
||||
if err := s.PopulateGroupMember(ctx, members...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
memberMap := make(map[string]*relationtb.GroupMemberModel)
|
||||
memberMap := make(map[string]*model.GroupMember)
|
||||
for i, member := range members {
|
||||
memberMap[member.UserID] = members[i]
|
||||
}
|
||||
@ -649,7 +629,7 @@ func (s *groupServer) GetGroupMembersInfo(ctx context.Context, req *pbgroup.GetG
|
||||
return nil, err
|
||||
}
|
||||
return &pbgroup.GetGroupMembersInfoResp{
|
||||
Members: datautil.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
Members: datautil.Slice(members, func(e *model.GroupMember) *sdkws.GroupMemberFullInfo {
|
||||
return convert.Db2PbGroupMember(e)
|
||||
}),
|
||||
}, nil
|
||||
@ -687,7 +667,7 @@ func (s *groupServer) GetGroupApplicationList(ctx context.Context, req *pbgroup.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
groupMap := datautil.SliceToMap(groups, func(e *relationtb.GroupModel) string {
|
||||
groupMap := datautil.SliceToMap(groups, func(e *model.Group) string {
|
||||
return e.GroupID
|
||||
})
|
||||
if ids := datautil.Single(datautil.Keys(groupMap), groupIDs); len(ids) > 0 {
|
||||
@ -704,10 +684,10 @@ func (s *groupServer) GetGroupApplicationList(ctx context.Context, req *pbgroup.
|
||||
if err := s.PopulateGroupMember(ctx, owners...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ownerMap := datautil.SliceToMap(owners, func(e *relationtb.GroupMemberModel) string {
|
||||
ownerMap := datautil.SliceToMap(owners, func(e *model.GroupMember) string {
|
||||
return e.GroupID
|
||||
})
|
||||
resp.GroupRequests = datautil.Slice(groupRequests, func(e *relationtb.GroupRequestModel) *sdkws.GroupRequest {
|
||||
resp.GroupRequests = datautil.Slice(groupRequests, func(e *model.GroupRequest) *sdkws.GroupRequest {
|
||||
var ownerUserID string
|
||||
if owner, ok := ownerMap[e.GroupID]; ok {
|
||||
ownerUserID = owner.UserID
|
||||
@ -736,11 +716,11 @@ func (s *groupServer) GetGroupsInfo(ctx context.Context, req *pbgroup.GetGroupsI
|
||||
if err := s.PopulateGroupMember(ctx, owners...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ownerMap := datautil.SliceToMap(owners, func(e *relationtb.GroupMemberModel) string {
|
||||
ownerMap := datautil.SliceToMap(owners, func(e *model.GroupMember) string {
|
||||
return e.GroupID
|
||||
})
|
||||
return &pbgroup.GetGroupsInfoResp{
|
||||
GroupInfos: datautil.Slice(groups, func(e *relationtb.GroupModel) *sdkws.GroupInfo {
|
||||
GroupInfos: datautil.Slice(groups, func(e *model.Group) *sdkws.GroupInfo {
|
||||
var ownerUserID string
|
||||
if owner, ok := ownerMap[e.GroupID]; ok {
|
||||
ownerUserID = owner.UserID
|
||||
@ -783,9 +763,9 @@ func (s *groupServer) GroupApplicationResponse(ctx context.Context, req *pbgroup
|
||||
if _, err := s.user.GetPublicUserInfo(ctx, req.FromUserID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var member *relationtb.GroupMemberModel
|
||||
var member *model.GroupMember
|
||||
if (!inGroup) && req.HandleResult == constant.GroupResponseAgree {
|
||||
member = &relationtb.GroupMemberModel{
|
||||
member = &model.GroupMember{
|
||||
GroupID: req.GroupID,
|
||||
UserID: req.FromUserID,
|
||||
Nickname: "",
|
||||
@ -857,7 +837,7 @@ func (s *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq)
|
||||
}
|
||||
log.ZDebug(ctx, "JoinGroup.groupInfo", "group", group, "eq", group.NeedVerification == constant.Directly)
|
||||
if group.NeedVerification == constant.Directly {
|
||||
groupMember := &relationtb.GroupMemberModel{
|
||||
groupMember := &model.GroupMember{
|
||||
GroupID: group.GroupID,
|
||||
UserID: user.UserID,
|
||||
RoleLevel: constant.GroupOrdinaryUsers,
|
||||
@ -871,7 +851,7 @@ func (s *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := s.db.CreateGroup(ctx, nil, []*relationtb.GroupMemberModel{groupMember}); err != nil {
|
||||
if err := s.db.CreateGroup(ctx, nil, []*model.GroupMember{groupMember}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -883,7 +863,7 @@ func (s *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq)
|
||||
|
||||
return &pbgroup.JoinGroupResp{}, nil
|
||||
}
|
||||
groupRequest := relationtb.GroupRequestModel{
|
||||
groupRequest := model.GroupRequest{
|
||||
UserID: req.InviterUserID,
|
||||
ReqMsg: req.ReqMessage,
|
||||
GroupID: req.GroupID,
|
||||
@ -892,7 +872,7 @@ func (s *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq)
|
||||
HandledTime: time.Unix(0, 0),
|
||||
Ex: req.Ex,
|
||||
}
|
||||
if err = s.db.CreateGroupRequest(ctx, []*relationtb.GroupRequestModel{&groupRequest}); err != nil {
|
||||
if err = s.db.CreateGroupRequest(ctx, []*model.GroupRequest{&groupRequest}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.notification.JoinGroupApplicationNotification(ctx, req)
|
||||
@ -940,7 +920,7 @@ func (s *groupServer) deleteMemberAndSetConversationSeq(ctx context.Context, gro
|
||||
}
|
||||
|
||||
func (s *groupServer) SetGroupInfo(ctx context.Context, req *pbgroup.SetGroupInfoReq) (*pbgroup.SetGroupInfoResp, error) {
|
||||
var opMember *relationtb.GroupMemberModel
|
||||
var opMember *model.GroupMember
|
||||
if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) {
|
||||
var err error
|
||||
opMember, err = s.db.TakeGroupMember(ctx, req.GroupInfoForSet.GroupID, mcontext.GetOpUserID(ctx))
|
||||
@ -1049,7 +1029,7 @@ func (s *groupServer) TransferGroupOwner(ctx context.Context, req *pbgroup.Trans
|
||||
if err := s.PopulateGroupMember(ctx, members...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
memberMap := datautil.SliceToMap(members, func(e *relationtb.GroupMemberModel) string { return e.UserID })
|
||||
memberMap := datautil.SliceToMap(members, func(e *model.GroupMember) string { return e.UserID })
|
||||
if ids := datautil.Single([]string{req.OldOwnerUserID, req.NewOwnerUserID}, datautil.Keys(memberMap)); len(ids) > 0 {
|
||||
return nil, errs.ErrArgs.WrapMsg("user not in group " + strings.Join(ids, ","))
|
||||
}
|
||||
@ -1078,7 +1058,7 @@ func (s *groupServer) TransferGroupOwner(ctx context.Context, req *pbgroup.Trans
|
||||
|
||||
func (s *groupServer) GetGroups(ctx context.Context, req *pbgroup.GetGroupsReq) (*pbgroup.GetGroupsResp, error) {
|
||||
var (
|
||||
group []*relationtb.GroupModel
|
||||
group []*model.Group
|
||||
err error
|
||||
)
|
||||
var resp pbgroup.GetGroupsResp
|
||||
@ -1095,7 +1075,7 @@ func (s *groupServer) GetGroups(ctx context.Context, req *pbgroup.GetGroupsReq)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
groupIDs := datautil.Slice(group, func(e *relationtb.GroupModel) string {
|
||||
groupIDs := datautil.Slice(group, func(e *model.Group) string {
|
||||
return e.GroupID
|
||||
})
|
||||
|
||||
@ -1104,14 +1084,14 @@ func (s *groupServer) GetGroups(ctx context.Context, req *pbgroup.GetGroupsReq)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ownerMemberMap := datautil.SliceToMap(ownerMembers, func(e *relationtb.GroupMemberModel) string {
|
||||
ownerMemberMap := datautil.SliceToMap(ownerMembers, func(e *model.GroupMember) string {
|
||||
return e.GroupID
|
||||
})
|
||||
groupMemberNumMap, err := s.db.MapGroupMemberNum(ctx, groupIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Groups = datautil.Slice(group, func(group *relationtb.GroupModel) *pbgroup.CMSGroup {
|
||||
resp.Groups = datautil.Slice(group, func(group *model.Group) *pbgroup.CMSGroup {
|
||||
var (
|
||||
userID string
|
||||
username string
|
||||
@ -1135,7 +1115,7 @@ func (s *groupServer) GetGroupMembersCMS(ctx context.Context, req *pbgroup.GetGr
|
||||
if err := s.PopulateGroupMember(ctx, members...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Members = datautil.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
resp.Members = datautil.Slice(members, func(e *model.GroupMember) *sdkws.GroupMemberFullInfo {
|
||||
return convert.Db2PbGroupMember(e)
|
||||
})
|
||||
return &resp, nil
|
||||
@ -1153,14 +1133,14 @@ func (s *groupServer) GetUserReqApplicationList(ctx context.Context, req *pbgrou
|
||||
if len(requests) == 0 {
|
||||
return &pbgroup.GetUserReqApplicationListResp{Total: uint32(total)}, nil
|
||||
}
|
||||
groupIDs := datautil.Distinct(datautil.Slice(requests, func(e *relationtb.GroupRequestModel) string {
|
||||
groupIDs := datautil.Distinct(datautil.Slice(requests, func(e *model.GroupRequest) string {
|
||||
return e.GroupID
|
||||
}))
|
||||
groups, err := s.db.FindGroup(ctx, groupIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
groupMap := datautil.SliceToMap(groups, func(e *relationtb.GroupModel) string {
|
||||
groupMap := datautil.SliceToMap(groups, func(e *model.Group) string {
|
||||
return e.GroupID
|
||||
})
|
||||
owners, err := s.db.FindGroupsOwner(ctx, groupIDs)
|
||||
@ -1170,7 +1150,7 @@ func (s *groupServer) GetUserReqApplicationList(ctx context.Context, req *pbgrou
|
||||
if err := s.PopulateGroupMember(ctx, owners...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ownerMap := datautil.SliceToMap(owners, func(e *relationtb.GroupMemberModel) string {
|
||||
ownerMap := datautil.SliceToMap(owners, func(e *model.GroupMember) string {
|
||||
return e.GroupID
|
||||
})
|
||||
groupMemberNum, err := s.db.MapGroupMemberNum(ctx, groupIDs)
|
||||
@ -1179,7 +1159,7 @@ func (s *groupServer) GetUserReqApplicationList(ctx context.Context, req *pbgrou
|
||||
}
|
||||
return &pbgroup.GetUserReqApplicationListResp{
|
||||
Total: uint32(total),
|
||||
GroupRequests: datautil.Slice(requests, func(e *relationtb.GroupRequestModel) *sdkws.GroupRequest {
|
||||
GroupRequests: datautil.Slice(requests, func(e *model.GroupRequest) *sdkws.GroupRequest {
|
||||
var ownerUserID string
|
||||
if owner, ok := ownerMap[e.GroupID]; ok {
|
||||
ownerUserID = owner.UserID
|
||||
@ -1430,8 +1410,8 @@ func (s *groupServer) SetGroupMemberInfo(ctx context.Context, req *pbgroup.SetGr
|
||||
}
|
||||
|
||||
}
|
||||
if err := s.db.UpdateGroupMembers(ctx, datautil.Slice(req.Members, func(e *pbgroup.SetGroupMemberInfo) *relationtb.BatchUpdateGroupMember {
|
||||
return &relationtb.BatchUpdateGroupMember{
|
||||
if err := s.db.UpdateGroupMembers(ctx, datautil.Slice(req.Members, func(e *pbgroup.SetGroupMemberInfo) *common.BatchUpdateGroupMember {
|
||||
return &common.BatchUpdateGroupMember{
|
||||
GroupID: e.GroupID,
|
||||
UserID: e.UserID,
|
||||
Map: UpdateGroupMemberMap(e),
|
||||
@ -1470,7 +1450,7 @@ func (s *groupServer) GetGroupAbstractInfo(ctx context.Context, req *pbgroup.Get
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ids := datautil.Single(req.GroupIDs, datautil.Slice(groups, func(group *relationtb.GroupModel) string {
|
||||
if ids := datautil.Single(req.GroupIDs, datautil.Slice(groups, func(group *model.Group) string {
|
||||
return group.GroupID
|
||||
})); len(ids) > 0 {
|
||||
return nil, servererrs.ErrGroupIDNotFound.WrapMsg("not found group " + strings.Join(ids, ","))
|
||||
@ -1483,7 +1463,7 @@ func (s *groupServer) GetGroupAbstractInfo(ctx context.Context, req *pbgroup.Get
|
||||
return nil, servererrs.ErrGroupIDNotFound.WrapMsg(fmt.Sprintf("group %s not found member", strings.Join(ids, ",")))
|
||||
}
|
||||
return &pbgroup.GetGroupAbstractInfoResp{
|
||||
GroupAbstractInfos: datautil.Slice(groups, func(group *relationtb.GroupModel) *pbgroup.GroupAbstractInfo {
|
||||
GroupAbstractInfos: datautil.Slice(groups, func(group *model.Group) *pbgroup.GroupAbstractInfo {
|
||||
users := groupUserMap[group.GroupID]
|
||||
return convert.Db2PbGroupAbstractInfo(group.GroupID, users.MemberNum, users.Hash)
|
||||
}),
|
||||
@ -1502,7 +1482,7 @@ func (s *groupServer) GetUserInGroupMembers(ctx context.Context, req *pbgroup.Ge
|
||||
return nil, err
|
||||
}
|
||||
return &pbgroup.GetUserInGroupMembersResp{
|
||||
Members: datautil.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
Members: datautil.Slice(members, func(e *model.GroupMember) *sdkws.GroupMemberFullInfo {
|
||||
return convert.Db2PbGroupMember(e)
|
||||
}),
|
||||
}, nil
|
||||
@ -1530,7 +1510,7 @@ func (s *groupServer) GetGroupMemberRoleLevel(ctx context.Context, req *pbgroup.
|
||||
return nil, err
|
||||
}
|
||||
return &pbgroup.GetGroupMemberRoleLevelResp{
|
||||
Members: datautil.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
Members: datautil.Slice(members, func(e *model.GroupMember) *sdkws.GroupMemberFullInfo {
|
||||
return convert.Db2PbGroupMember(e)
|
||||
}),
|
||||
}, nil
|
||||
@ -1544,14 +1524,14 @@ func (s *groupServer) GetGroupUsersReqApplicationList(ctx context.Context, req *
|
||||
if len(requests) == 0 {
|
||||
return &pbgroup.GetGroupUsersReqApplicationListResp{}, nil
|
||||
}
|
||||
groupIDs := datautil.Distinct(datautil.Slice(requests, func(e *relationtb.GroupRequestModel) string {
|
||||
groupIDs := datautil.Distinct(datautil.Slice(requests, func(e *model.GroupRequest) string {
|
||||
return e.GroupID
|
||||
}))
|
||||
groups, err := s.db.FindGroup(ctx, groupIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
groupMap := datautil.SliceToMap(groups, func(e *relationtb.GroupModel) string {
|
||||
groupMap := datautil.SliceToMap(groups, func(e *model.Group) string {
|
||||
return e.GroupID
|
||||
})
|
||||
if ids := datautil.Single(groupIDs, datautil.Keys(groupMap)); len(ids) > 0 {
|
||||
@ -1564,7 +1544,7 @@ func (s *groupServer) GetGroupUsersReqApplicationList(ctx context.Context, req *
|
||||
if err := s.PopulateGroupMember(ctx, owners...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ownerMap := datautil.SliceToMap(owners, func(e *relationtb.GroupMemberModel) string {
|
||||
ownerMap := datautil.SliceToMap(owners, func(e *model.GroupMember) string {
|
||||
return e.GroupID
|
||||
})
|
||||
groupMemberNum, err := s.db.MapGroupMemberNum(ctx, groupIDs)
|
||||
@ -1573,7 +1553,7 @@ func (s *groupServer) GetGroupUsersReqApplicationList(ctx context.Context, req *
|
||||
}
|
||||
return &pbgroup.GetGroupUsersReqApplicationListResp{
|
||||
Total: int64(len(requests)),
|
||||
GroupRequests: datautil.Slice(requests, func(e *relationtb.GroupRequestModel) *sdkws.GroupRequest {
|
||||
GroupRequests: datautil.Slice(requests, func(e *model.GroupRequest) *sdkws.GroupRequest {
|
||||
var ownerUserID string
|
||||
if owner, ok := ownerMap[e.GroupID]; ok {
|
||||
ownerUserID = owner.UserID
|
||||
|
||||
@ -17,12 +17,12 @@ package group
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
pbgroup "github.com/openimsdk/protocol/group"
|
||||
@ -50,7 +50,7 @@ type GroupNotificationSender struct {
|
||||
config *Config
|
||||
}
|
||||
|
||||
func (g *GroupNotificationSender) PopulateGroupMember(ctx context.Context, members ...*relation.GroupMemberModel) error {
|
||||
func (g *GroupNotificationSender) PopulateGroupMember(ctx context.Context, members ...*model.GroupMember) error {
|
||||
if len(members) == 0 {
|
||||
return nil
|
||||
}
|
||||
@ -186,12 +186,12 @@ func (g *GroupNotificationSender) getGroupOwnerAndAdminUserID(ctx context.Contex
|
||||
if err := g.PopulateGroupMember(ctx, members...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fn := func(e *relation.GroupMemberModel) string { return e.UserID }
|
||||
fn := func(e *model.GroupMember) string { return e.UserID }
|
||||
return datautil.Slice(members, fn), nil
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func (g *GroupNotificationSender) groupDB2PB(group *relation.GroupModel, ownerUserID string, memberCount uint32) *sdkws.GroupInfo {
|
||||
func (g *GroupNotificationSender) groupDB2PB(group *model.Group, ownerUserID string, memberCount uint32) *sdkws.GroupInfo {
|
||||
return &sdkws.GroupInfo{
|
||||
GroupID: group.GroupID,
|
||||
GroupName: group.GroupName,
|
||||
@ -213,7 +213,7 @@ func (g *GroupNotificationSender) groupDB2PB(group *relation.GroupModel, ownerUs
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GroupNotificationSender) groupMemberDB2PB(member *relation.GroupMemberModel, appMangerLevel int32) *sdkws.GroupMemberFullInfo {
|
||||
func (g *GroupNotificationSender) groupMemberDB2PB(member *model.GroupMember, appMangerLevel int32) *sdkws.GroupMemberFullInfo {
|
||||
return &sdkws.GroupMemberFullInfo{
|
||||
GroupID: member.GroupID,
|
||||
UserID: member.UserID,
|
||||
@ -715,7 +715,3 @@ func (g *GroupNotificationSender) GroupMemberSetToOrdinaryUserNotification(ctx c
|
||||
}
|
||||
g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.GroupMemberSetToOrdinaryUserNotification, tips)
|
||||
}
|
||||
|
||||
func (g *GroupNotificationSender) SuperGroupNotification(ctx context.Context, sendID, recvID string) {
|
||||
g.Notification(ctx, sendID, recvID, constant.SuperGroupUpdateNotification, nil)
|
||||
}
|
||||
|
||||
@ -17,10 +17,10 @@ package msg
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/msg"
|
||||
@ -93,7 +93,7 @@ func (m *msgServer) RevokeMsg(ctx context.Context, req *msg.RevokeMsgReq) (*msg.
|
||||
}
|
||||
}
|
||||
now := time.Now().UnixMilli()
|
||||
err = m.MsgDatabase.RevokeMsg(ctx, req.ConversationID, req.Seq, &relation.RevokeModel{
|
||||
err = m.MsgDatabase.RevokeMsg(ctx, req.ConversationID, req.Seq, &model.RevokeModel{
|
||||
Role: role,
|
||||
UserID: req.UserID,
|
||||
Nickname: user.Nickname,
|
||||
|
||||
@ -41,7 +41,7 @@ func (m *msgServer) SendMsg(ctx context.Context, req *pbmsg.SendMsgReq) (*pbmsg.
|
||||
case constant.NotificationChatType:
|
||||
return m.sendMsgNotification(ctx, req)
|
||||
case constant.ReadGroupChatType:
|
||||
return m.sendMsgSuperGroupChat(ctx, req)
|
||||
return m.sendMsgGroupChat(ctx, req)
|
||||
default:
|
||||
return nil, errs.ErrArgs.WrapMsg("unknown sessionType")
|
||||
}
|
||||
@ -49,7 +49,7 @@ func (m *msgServer) SendMsg(ctx context.Context, req *pbmsg.SendMsgReq) (*pbmsg.
|
||||
return nil, errs.ErrArgs.WrapMsg("msgData is nil")
|
||||
}
|
||||
|
||||
func (m *msgServer) sendMsgSuperGroupChat(ctx context.Context, req *pbmsg.SendMsgReq) (resp *pbmsg.SendMsgResp, err error) {
|
||||
func (m *msgServer) sendMsgGroupChat(ctx context.Context, req *pbmsg.SendMsgReq) (resp *pbmsg.SendMsgResp, err error) {
|
||||
if err = m.messageVerification(ctx, req); err != nil {
|
||||
prommetrics.GroupChatMsgProcessFailedCounter.Inc()
|
||||
return nil, err
|
||||
@ -110,6 +110,7 @@ func (m *msgServer) setConversationAtInfo(nctx context.Context, msg *sdkws.MsgDa
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "SetConversations", err, "userID", memberUserIDList, "conversation", conversation)
|
||||
}
|
||||
return
|
||||
}
|
||||
conversation.GroupAtType = &wrapperspb.Int32Value{Value: constant.AtMe}
|
||||
err := m.Conversation.SetConversations(ctx, msg.AtUserIDList, conversation)
|
||||
|
||||
@ -17,14 +17,14 @@ package msg
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
@ -85,9 +85,8 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//todo MsgCacheTimeout
|
||||
msgModel := cache.NewMsgCache(rdb, config.RedisConfig.EnablePipeline)
|
||||
seqModel := cache.NewSeqCache(rdb)
|
||||
msgModel := redis.NewMsgCache(rdb)
|
||||
seqModel := redis.NewSeqCache(rdb)
|
||||
conversationClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation)
|
||||
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
|
||||
groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
|
||||
|
||||
@ -16,9 +16,9 @@ package msg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/protocol/msg"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
@ -31,7 +31,7 @@ func (m *msgServer) GetActiveUser(ctx context.Context, req *msg.GetActiveUserReq
|
||||
}
|
||||
var pbUsers []*msg.ActiveUser
|
||||
if len(users) > 0 {
|
||||
userIDs := datautil.Slice(users, func(e *relation.UserCount) string { return e.UserID })
|
||||
userIDs := datautil.Slice(users, func(e *model.UserCount) string { return e.UserID })
|
||||
userMap, err := m.UserLocalCache.GetUsersInfoMap(ctx, userIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -66,7 +66,7 @@ func (m *msgServer) GetActiveGroup(ctx context.Context, req *msg.GetActiveGroupR
|
||||
}
|
||||
var pbgroups []*msg.ActiveGroup
|
||||
if len(groups) > 0 {
|
||||
groupIDs := datautil.Slice(groups, func(e *relation.GroupCount) string { return e.GroupID })
|
||||
groupIDs := datautil.Slice(groups, func(e *model.GroupCount) string { return e.GroupID })
|
||||
resp, err := m.GroupLocalCache.GetGroupInfos(ctx, groupIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@ -17,10 +17,10 @@ package third
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/third"
|
||||
@ -45,11 +45,11 @@ func genLogID() string {
|
||||
}
|
||||
|
||||
func (t *thirdServer) UploadLogs(ctx context.Context, req *third.UploadLogsReq) (*third.UploadLogsResp, error) {
|
||||
var dbLogs []*relationtb.LogModel
|
||||
var dbLogs []*relationtb.Log
|
||||
userID := ctx.Value(constant.OpUserID).(string)
|
||||
platform := constant.PlatformID2Name[int(req.Platform)]
|
||||
for _, fileURL := range req.FileURLs {
|
||||
log := relationtb.LogModel{
|
||||
log := relationtb.Log{
|
||||
Version: req.Version,
|
||||
SystemType: req.SystemType,
|
||||
Platform: platform,
|
||||
@ -70,7 +70,7 @@ func (t *thirdServer) UploadLogs(ctx context.Context, req *third.UploadLogsReq)
|
||||
}
|
||||
}
|
||||
if log.LogID == "" {
|
||||
return nil, servererrs.ErrData.WrapMsg("LogModel id gen error")
|
||||
return nil, servererrs.ErrData.WrapMsg("Log id gen error")
|
||||
}
|
||||
dbLogs = append(dbLogs, &log)
|
||||
}
|
||||
@ -105,8 +105,8 @@ func (t *thirdServer) DeleteLogs(ctx context.Context, req *third.DeleteLogsReq)
|
||||
return &third.DeleteLogsResp{}, nil
|
||||
}
|
||||
|
||||
func dbToPbLogInfos(logs []*relationtb.LogModel) []*third.LogInfo {
|
||||
db2pbForLogInfo := func(log *relationtb.LogModel) *third.LogInfo {
|
||||
func dbToPbLogInfos(logs []*relationtb.Log) []*third.LogInfo {
|
||||
db2pbForLogInfo := func(log *relationtb.Log) *third.LogInfo {
|
||||
return &third.LogInfo{
|
||||
Filename: log.FileName,
|
||||
UserID: log.UserID,
|
||||
|
||||
@ -19,12 +19,12 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/protocol/third"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
@ -60,7 +60,7 @@ func (t *thirdServer) InitiateMultipartUpload(ctx context.Context, req *third.In
|
||||
result, err := t.s3dataBase.InitiateMultipartUpload(ctx, req.Hash, req.Size, t.defaultExpire, int(req.MaxParts))
|
||||
if err != nil {
|
||||
if haErr, ok := errs.Unwrap(err).(*cont.HashAlreadyExistsError); ok {
|
||||
obj := &relation.ObjectModel{
|
||||
obj := &model.Object{
|
||||
Name: req.Name,
|
||||
UserID: mcontext.GetOpUserID(ctx),
|
||||
Hash: req.Hash,
|
||||
@ -137,7 +137,7 @@ func (t *thirdServer) CompleteMultipartUpload(ctx context.Context, req *third.Co
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj := &relation.ObjectModel{
|
||||
obj := &model.Object{
|
||||
Name: req.Name,
|
||||
UserID: mcontext.GetOpUserID(ctx),
|
||||
Hash: result.Hash,
|
||||
@ -263,7 +263,7 @@ func (t *thirdServer) CompleteFormData(ctx context.Context, req *third.CompleteF
|
||||
if info.Size > 0 && info.Size != mate.Size {
|
||||
return nil, servererrs.ErrData.WrapMsg("file size mismatch")
|
||||
}
|
||||
obj := &relation.ObjectModel{
|
||||
obj := &model.Object{
|
||||
Name: mate.Name,
|
||||
UserID: mcontext.GetOpUserID(ctx),
|
||||
Hash: "etag_" + info.ETag,
|
||||
|
||||
@ -18,11 +18,12 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/protocol/third"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
@ -75,7 +76,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
var o s3.Interface
|
||||
switch enable {
|
||||
case "minio":
|
||||
o, err = minio.NewMinio(ctx, cache.NewMinioCache(rdb), *config.MinioConfig.Build())
|
||||
o, err = minio.NewMinio(ctx, redis.NewMinioCache(rdb), *config.MinioConfig.Build())
|
||||
case "cos":
|
||||
o, err = cos.NewCos(*config.RpcConfig.Object.Cos.Build())
|
||||
case "oss":
|
||||
@ -86,9 +87,9 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cache.InitLocalCache(&config.LocalCacheConfig)
|
||||
localcache.InitLocalCache(&config.LocalCacheConfig)
|
||||
third.RegisterThirdServer(server, &thirdServer{
|
||||
thirdDatabase: controller.NewThirdDatabase(cache.NewThirdCache(rdb), logdb),
|
||||
thirdDatabase: controller.NewThirdDatabase(redis.NewThirdCache(rdb), logdb),
|
||||
userRpcClient: rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID),
|
||||
s3dataBase: controller.NewS3Database(rdb, o, s3db),
|
||||
defaultExpire: time.Hour * 24 * 7,
|
||||
|
||||
@ -16,10 +16,10 @@ package user
|
||||
|
||||
import (
|
||||
"context"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
@ -41,7 +41,7 @@ func WithUserDB(db controller.UserDatabase) userNotificationSenderOptions {
|
||||
}
|
||||
|
||||
func WithUserFunc(
|
||||
fn func(ctx context.Context, userIDs []string) (users []*relationtb.UserModel, err error),
|
||||
fn func(ctx context.Context, userIDs []string) (users []*relationtb.User, err error),
|
||||
) userNotificationSenderOptions {
|
||||
return func(u *UserNotificationSender) {
|
||||
f := func(ctx context.Context, userIDs []string) (result []notification.CommonUser, err error) {
|
||||
|
||||
@ -18,7 +18,11 @@ import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/rpc/friend"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"math/rand"
|
||||
"strings"
|
||||
@ -26,12 +30,8 @@ import (
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
@ -77,22 +77,22 @@ func Start(ctx context.Context, config *Config, client registry.SvcDiscoveryRegi
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
users := make([]*tablerelation.UserModel, 0)
|
||||
users := make([]*tablerelation.User, 0)
|
||||
|
||||
for _, v := range config.Share.IMAdminUserID {
|
||||
users = append(users, &tablerelation.UserModel{UserID: v, Nickname: v, AppMangerLevel: constant.AppNotificationAdmin})
|
||||
users = append(users, &tablerelation.User{UserID: v, Nickname: v, AppMangerLevel: constant.AppNotificationAdmin})
|
||||
}
|
||||
userDB, err := mgo.NewUserMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
userCache := cache.NewUserCacheRedis(rdb, &config.LocalCacheConfig, userDB, cache.GetDefaultOpt())
|
||||
userCache := redis.NewUserCacheRedis(rdb, &config.LocalCacheConfig, userDB, redis.GetRocksCacheOptions())
|
||||
userMongoDB := mgo.NewUserMongoDriver(mgocli.GetDB())
|
||||
database := controller.NewUserDatabase(userDB, userCache, mgocli.GetTx(), userMongoDB)
|
||||
friendRpcClient := rpcclient.NewFriendRpcClient(client, config.Share.RpcRegisterName.Friend)
|
||||
groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
|
||||
msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg)
|
||||
cache.InitLocalCache(&config.LocalCacheConfig)
|
||||
localcache.InitLocalCache(&config.LocalCacheConfig)
|
||||
u := &userServer{
|
||||
db: database,
|
||||
RegisterCenter: client,
|
||||
@ -281,9 +281,9 @@ func (s *userServer) UserRegister(ctx context.Context, req *pbuser.UserRegisterR
|
||||
return nil, err
|
||||
}
|
||||
now := time.Now()
|
||||
users := make([]*tablerelation.UserModel, 0, len(req.Users))
|
||||
users := make([]*tablerelation.User, 0, len(req.Users))
|
||||
for _, user := range req.Users {
|
||||
users = append(users, &tablerelation.UserModel{
|
||||
users = append(users, &tablerelation.User{
|
||||
UserID: user.UserID,
|
||||
Nickname: user.Nickname,
|
||||
FaceURL: user.FaceURL,
|
||||
@ -403,7 +403,7 @@ func (s *userServer) ProcessUserCommandAdd(ctx context.Context, req *pbuser.Proc
|
||||
if req.Ex != nil {
|
||||
value = req.Ex.Value
|
||||
}
|
||||
// Assuming you have a method in s.db to add a user command
|
||||
// Assuming you have a method in s.storage to add a user command
|
||||
err = s.db.AddUserCommand(ctx, req.UserID, req.Type, req.Uuid, value, ex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -451,7 +451,7 @@ func (s *userServer) ProcessUserCommandUpdate(ctx context.Context, req *pbuser.P
|
||||
val["ex"] = req.Ex.Value
|
||||
}
|
||||
|
||||
// Assuming you have a method in s.db to update a user command
|
||||
// Assuming you have a method in s.storage to update a user command
|
||||
err = s.db.UpdateUserCommand(ctx, req.UserID, req.Type, req.Uuid, val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -548,14 +548,14 @@ func (s *userServer) AddNotificationAccount(ctx context.Context, req *pbuser.Add
|
||||
}
|
||||
}
|
||||
|
||||
user := &tablerelation.UserModel{
|
||||
user := &tablerelation.User{
|
||||
UserID: req.UserID,
|
||||
Nickname: req.NickName,
|
||||
FaceURL: req.FaceURL,
|
||||
CreateTime: time.Now(),
|
||||
AppMangerLevel: constant.AppNotificationAdmin,
|
||||
}
|
||||
if err := s.db.Create(ctx, []*tablerelation.UserModel{user}); err != nil {
|
||||
if err := s.db.Create(ctx, []*tablerelation.User{user}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -598,7 +598,7 @@ func (s *userServer) SearchNotificationAccount(ctx context.Context, req *pbuser.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var users []*relation.UserModel
|
||||
var users []*tablerelation.User
|
||||
var err error
|
||||
|
||||
// If a keyword is provided in the request
|
||||
@ -664,7 +664,7 @@ func (s *userServer) genUserID() string {
|
||||
return string(data)
|
||||
}
|
||||
|
||||
func (s *userServer) userModelToResp(users []*relation.UserModel, pagination pagination.Pagination) *pbuser.SearchNotificationAccountResp {
|
||||
func (s *userServer) userModelToResp(users []*tablerelation.User, pagination pagination.Pagination) *pbuser.SearchNotificationAccountResp {
|
||||
accounts := make([]*pbuser.NotificationAccountInfo, 0)
|
||||
var total int64
|
||||
for _, v := range users {
|
||||
|
||||
@ -323,13 +323,12 @@ type User struct {
|
||||
}
|
||||
|
||||
type Redis struct {
|
||||
Address []string `mapstructure:"address"`
|
||||
Username string `mapstructure:"username"`
|
||||
Password string `mapstructure:"password"`
|
||||
EnablePipeline bool `mapstructure:"enablePipeline"`
|
||||
ClusterMode bool `mapstructure:"clusterMode"`
|
||||
DB int `mapstructure:"db"`
|
||||
MaxRetry int `mapstructure:"MaxRetry"`
|
||||
Address []string `mapstructure:"address"`
|
||||
Username string `mapstructure:"username"`
|
||||
Password string `mapstructure:"password"`
|
||||
ClusterMode bool `mapstructure:"clusterMode"`
|
||||
DB int `mapstructure:"storage"`
|
||||
MaxRetry int `mapstructure:"MaxRetry"`
|
||||
}
|
||||
|
||||
type BeforeConfig struct {
|
||||
|
||||
@ -16,13 +16,13 @@ package convert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
sdk "github.com/openimsdk/protocol/sdkws"
|
||||
)
|
||||
|
||||
func BlackDB2Pb(ctx context.Context, blackDBs []*relation.BlackModel, f func(ctx context.Context, userIDs []string) (map[string]*sdkws.UserInfo, error)) (blackPbs []*sdk.BlackInfo, err error) {
|
||||
func BlackDB2Pb(ctx context.Context, blackDBs []*model.Black, f func(ctx context.Context, userIDs []string) (map[string]*sdkws.UserInfo, error)) (blackPbs []*sdk.BlackInfo, err error) {
|
||||
if len(blackDBs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -15,12 +15,12 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/protocol/conversation"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
)
|
||||
|
||||
func ConversationDB2Pb(conversationDB *relation.ConversationModel) *conversation.Conversation {
|
||||
func ConversationDB2Pb(conversationDB *model.Conversation) *conversation.Conversation {
|
||||
conversationPB := &conversation.Conversation{}
|
||||
conversationPB.LatestMsgDestructTime = conversationDB.LatestMsgDestructTime.Unix()
|
||||
if err := datautil.CopyStructFields(conversationPB, conversationDB); err != nil {
|
||||
@ -29,7 +29,7 @@ func ConversationDB2Pb(conversationDB *relation.ConversationModel) *conversation
|
||||
return conversationPB
|
||||
}
|
||||
|
||||
func ConversationsDB2Pb(conversationsDB []*relation.ConversationModel) (conversationsPB []*conversation.Conversation) {
|
||||
func ConversationsDB2Pb(conversationsDB []*model.Conversation) (conversationsPB []*conversation.Conversation) {
|
||||
for _, conversationDB := range conversationsDB {
|
||||
conversationPB := &conversation.Conversation{}
|
||||
if err := datautil.CopyStructFields(conversationPB, conversationDB); err != nil {
|
||||
@ -41,17 +41,17 @@ func ConversationsDB2Pb(conversationsDB []*relation.ConversationModel) (conversa
|
||||
return conversationsPB
|
||||
}
|
||||
|
||||
func ConversationPb2DB(conversationPB *conversation.Conversation) *relation.ConversationModel {
|
||||
conversationDB := &relation.ConversationModel{}
|
||||
func ConversationPb2DB(conversationPB *conversation.Conversation) *model.Conversation {
|
||||
conversationDB := &model.Conversation{}
|
||||
if err := datautil.CopyStructFields(conversationDB, conversationPB); err != nil {
|
||||
return nil
|
||||
}
|
||||
return conversationDB
|
||||
}
|
||||
|
||||
func ConversationsPb2DB(conversationsPB []*conversation.Conversation) (conversationsDB []*relation.ConversationModel) {
|
||||
func ConversationsPb2DB(conversationsPB []*conversation.Conversation) (conversationsDB []*model.Conversation) {
|
||||
for _, conversationPB := range conversationsPB {
|
||||
conversationDB := &relation.ConversationModel{}
|
||||
conversationDB := &model.Conversation{}
|
||||
if err := datautil.CopyStructFields(conversationDB, conversationPB); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -17,15 +17,15 @@ package convert
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/timeutil"
|
||||
)
|
||||
|
||||
func FriendPb2DB(friend *sdkws.FriendInfo) *relation.FriendModel {
|
||||
dbFriend := &relation.FriendModel{}
|
||||
func FriendPb2DB(friend *sdkws.FriendInfo) *model.Friend {
|
||||
dbFriend := &model.Friend{}
|
||||
err := datautil.CopyStructFields(dbFriend, friend)
|
||||
if err != nil {
|
||||
return nil
|
||||
@ -35,7 +35,7 @@ func FriendPb2DB(friend *sdkws.FriendInfo) *relation.FriendModel {
|
||||
return dbFriend
|
||||
}
|
||||
|
||||
func FriendDB2Pb(ctx context.Context, friendDB *relation.FriendModel,
|
||||
func FriendDB2Pb(ctx context.Context, friendDB *model.Friend,
|
||||
getUsers func(ctx context.Context, userIDs []string) (map[string]*sdkws.UserInfo, error),
|
||||
) (*sdkws.FriendInfo, error) {
|
||||
users, err := getUsers(ctx, []string{friendDB.FriendUserID})
|
||||
@ -55,7 +55,7 @@ func FriendDB2Pb(ctx context.Context, friendDB *relation.FriendModel,
|
||||
|
||||
func FriendsDB2Pb(
|
||||
ctx context.Context,
|
||||
friendsDB []*relation.FriendModel,
|
||||
friendsDB []*model.Friend,
|
||||
getUsers func(ctx context.Context, userIDs []string) (map[string]*sdkws.UserInfo, error),
|
||||
) (friendsPb []*sdkws.FriendInfo, err error) {
|
||||
if len(friendsDB) == 0 {
|
||||
@ -89,7 +89,7 @@ func FriendsDB2Pb(
|
||||
|
||||
}
|
||||
|
||||
func FriendRequestDB2Pb(ctx context.Context, friendRequests []*relation.FriendRequestModel, getUsers func(ctx context.Context, userIDs []string) (map[string]*sdkws.UserInfo, error)) ([]*sdkws.FriendRequest, error) {
|
||||
func FriendRequestDB2Pb(ctx context.Context, friendRequests []*model.FriendRequest, getUsers func(ctx context.Context, userIDs []string) (map[string]*sdkws.UserInfo, error)) ([]*sdkws.FriendRequest, error) {
|
||||
if len(friendRequests) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@ -134,8 +134,8 @@ func FriendPb2DBMap(friend *sdkws.FriendInfo) map[string]any {
|
||||
|
||||
val := make(map[string]any)
|
||||
|
||||
// Assuming FriendInfo has similar fields to those in FriendModel.
|
||||
// Add or remove fields based on your actual FriendInfo and FriendModel structures.
|
||||
// Assuming FriendInfo has similar fields to those in Friend.
|
||||
// Add or remove fields based on your actual FriendInfo and Friend structures.
|
||||
if friend.FriendUser != nil {
|
||||
if friend.FriendUser.UserID != "" {
|
||||
val["friend_user_id"] = friend.FriendUser.UserID
|
||||
|
||||
@ -15,14 +15,14 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
pbgroup "github.com/openimsdk/protocol/group"
|
||||
sdkws "github.com/openimsdk/protocol/sdkws"
|
||||
)
|
||||
|
||||
func Db2PbGroupInfo(m *relation.GroupModel, ownerUserID string, memberCount uint32) *sdkws.GroupInfo {
|
||||
func Db2PbGroupInfo(m *model.Group, ownerUserID string, memberCount uint32) *sdkws.GroupInfo {
|
||||
return &sdkws.GroupInfo{
|
||||
GroupID: m.GroupID,
|
||||
GroupName: m.GroupName,
|
||||
@ -44,8 +44,8 @@ func Db2PbGroupInfo(m *relation.GroupModel, ownerUserID string, memberCount uint
|
||||
}
|
||||
}
|
||||
|
||||
func Pb2DbGroupRequest(req *pbgroup.GroupApplicationResponseReq, handleUserID string) *relation.GroupRequestModel {
|
||||
return &relation.GroupRequestModel{
|
||||
func Pb2DbGroupRequest(req *pbgroup.GroupApplicationResponseReq, handleUserID string) *model.GroupRequest {
|
||||
return &model.GroupRequest{
|
||||
UserID: req.FromUserID,
|
||||
GroupID: req.GroupID,
|
||||
HandleResult: req.HandleResult,
|
||||
@ -55,7 +55,7 @@ func Pb2DbGroupRequest(req *pbgroup.GroupApplicationResponseReq, handleUserID st
|
||||
}
|
||||
}
|
||||
|
||||
func Db2PbCMSGroup(m *relation.GroupModel, ownerUserID string, ownerUserName string, memberCount uint32) *pbgroup.CMSGroup {
|
||||
func Db2PbCMSGroup(m *model.Group, ownerUserID string, ownerUserName string, memberCount uint32) *pbgroup.CMSGroup {
|
||||
return &pbgroup.CMSGroup{
|
||||
GroupInfo: Db2PbGroupInfo(m, ownerUserID, memberCount),
|
||||
GroupOwnerUserID: ownerUserID,
|
||||
@ -63,7 +63,7 @@ func Db2PbCMSGroup(m *relation.GroupModel, ownerUserID string, ownerUserName str
|
||||
}
|
||||
}
|
||||
|
||||
func Db2PbGroupMember(m *relation.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
func Db2PbGroupMember(m *model.GroupMember) *sdkws.GroupMemberFullInfo {
|
||||
return &sdkws.GroupMemberFullInfo{
|
||||
GroupID: m.GroupID,
|
||||
UserID: m.UserID,
|
||||
@ -80,7 +80,7 @@ func Db2PbGroupMember(m *relation.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
}
|
||||
}
|
||||
|
||||
func Db2PbGroupRequest(m *relation.GroupRequestModel, user *sdkws.PublicUserInfo, group *sdkws.GroupInfo) *sdkws.GroupRequest {
|
||||
func Db2PbGroupRequest(m *model.GroupRequest, user *sdkws.PublicUserInfo, group *sdkws.GroupInfo) *sdkws.GroupRequest {
|
||||
return &sdkws.GroupRequest{
|
||||
UserInfo: user,
|
||||
GroupInfo: group,
|
||||
@ -108,8 +108,8 @@ func Db2PbGroupAbstractInfo(
|
||||
}
|
||||
}
|
||||
|
||||
func Pb2DBGroupInfo(m *sdkws.GroupInfo) *relation.GroupModel {
|
||||
return &relation.GroupModel{
|
||||
func Pb2DBGroupInfo(m *sdkws.GroupInfo) *model.Group {
|
||||
return &model.Group{
|
||||
GroupID: m.GroupID,
|
||||
GroupName: m.GroupName,
|
||||
Notification: m.Notification,
|
||||
@ -128,8 +128,8 @@ func Pb2DBGroupInfo(m *sdkws.GroupInfo) *relation.GroupModel {
|
||||
}
|
||||
}
|
||||
|
||||
// func Pb2DbGroupMember(m *sdkws.UserInfo) *relation.GroupMemberModel {
|
||||
// return &relation.GroupMemberModel{
|
||||
// func Pb2DbGroupMember(m *sdkws.UserInfo) *relation.GroupMember {
|
||||
// return &relation.GroupMember{
|
||||
// UserID: m.UserID,
|
||||
// Nickname: m.Nickname,
|
||||
// FaceURL: m.FaceURL,
|
||||
|
||||
@ -15,16 +15,16 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
)
|
||||
|
||||
func MsgPb2DB(msg *sdkws.MsgData) *relation.MsgDataModel {
|
||||
func MsgPb2DB(msg *sdkws.MsgData) *model.MsgDataModel {
|
||||
if msg == nil {
|
||||
return nil
|
||||
}
|
||||
var msgDataModel relation.MsgDataModel
|
||||
var msgDataModel model.MsgDataModel
|
||||
msgDataModel.SendID = msg.SendID
|
||||
msgDataModel.RecvID = msg.RecvID
|
||||
msgDataModel.GroupID = msg.GroupID
|
||||
@ -43,7 +43,7 @@ func MsgPb2DB(msg *sdkws.MsgData) *relation.MsgDataModel {
|
||||
msgDataModel.Status = msg.Status
|
||||
msgDataModel.Options = msg.Options
|
||||
if msg.OfflinePushInfo != nil {
|
||||
msgDataModel.OfflinePush = &relation.OfflinePushModel{
|
||||
msgDataModel.OfflinePush = &model.OfflinePushModel{
|
||||
Title: msg.OfflinePushInfo.Title,
|
||||
Desc: msg.OfflinePushInfo.Desc,
|
||||
Ex: msg.OfflinePushInfo.Ex,
|
||||
@ -57,7 +57,7 @@ func MsgPb2DB(msg *sdkws.MsgData) *relation.MsgDataModel {
|
||||
return &msgDataModel
|
||||
}
|
||||
|
||||
func MsgDB2Pb(msgModel *relation.MsgDataModel) *sdkws.MsgData {
|
||||
func MsgDB2Pb(msgModel *model.MsgDataModel) *sdkws.MsgData {
|
||||
if msgModel == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -15,13 +15,13 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"time"
|
||||
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
)
|
||||
|
||||
func UsersDB2Pb(users []*relationtb.UserModel) []*sdkws.UserInfo {
|
||||
func UsersDB2Pb(users []*relationtb.User) []*sdkws.UserInfo {
|
||||
result := make([]*sdkws.UserInfo, 0, len(users))
|
||||
for _, user := range users {
|
||||
userPb := &sdkws.UserInfo{
|
||||
@ -38,8 +38,8 @@ func UsersDB2Pb(users []*relationtb.UserModel) []*sdkws.UserInfo {
|
||||
return result
|
||||
}
|
||||
|
||||
func UserPb2DB(user *sdkws.UserInfo) *relationtb.UserModel {
|
||||
return &relationtb.UserModel{
|
||||
func UserPb2DB(user *sdkws.UserInfo) *relationtb.User {
|
||||
return &relationtb.User{
|
||||
UserID: user.UserID,
|
||||
Nickname: user.Nickname,
|
||||
FaceURL: user.FaceURL,
|
||||
|
||||
@ -15,17 +15,16 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
)
|
||||
|
||||
func TestUsersDB2Pb(t *testing.T) {
|
||||
type args struct {
|
||||
users []*relationtb.UserModel
|
||||
users []*relationtb.User
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@ -50,7 +49,7 @@ func TestUserPb2DB(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want *relationtb.UserModel
|
||||
want *relationtb.User
|
||||
}{
|
||||
// TODO: Add test cases.
|
||||
}
|
||||
|
||||
371
pkg/common/db/cache/conversation.go
vendored
371
pkg/common/db/cache/conversation.go
vendored
@ -1,371 +0,0 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/encrypt"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const (
|
||||
// ConversationKey = "CONVERSATION:"
|
||||
// conversationIDsKey = "CONVERSATION_IDS:"
|
||||
// conversationIDsHashKey = "CONVERSATION_IDS_HASH:"
|
||||
// conversationHasReadSeqKey = "CONVERSATION_HAS_READ_SEQ:"
|
||||
// recvMsgOptKey = "RECV_MSG_OPT:"
|
||||
// superGroupRecvMsgNotNotifyUserIDsKey = "SUPER_GROUP_RECV_MSG_NOT_NOTIFY_USER_IDS:"
|
||||
// superGroupRecvMsgNotNotifyUserIDsHashKey = "SUPER_GROUP_RECV_MSG_NOT_NOTIFY_USER_IDS_HASH:"
|
||||
// conversationNotReceiveMessageUserIDsKey = "CONVERSATION_NOT_RECEIVE_MESSAGE_USER_IDS:".
|
||||
|
||||
conversationExpireTime = time.Second * 60 * 60 * 12
|
||||
)
|
||||
|
||||
// arg fn will exec when no data in msgCache.
|
||||
type ConversationCache interface {
|
||||
metaCache
|
||||
NewCache() ConversationCache
|
||||
// get user's conversationIDs from msgCache
|
||||
GetUserConversationIDs(ctx context.Context, ownerUserID string) ([]string, error)
|
||||
DelConversationIDs(userIDs ...string) ConversationCache
|
||||
|
||||
GetUserConversationIDsHash(ctx context.Context, ownerUserID string) (hash uint64, err error)
|
||||
DelUserConversationIDsHash(ownerUserIDs ...string) ConversationCache
|
||||
|
||||
// get one conversation from msgCache
|
||||
GetConversation(ctx context.Context, ownerUserID, conversationID string) (*relationtb.ConversationModel, error)
|
||||
DelConversations(ownerUserID string, conversationIDs ...string) ConversationCache
|
||||
DelUsersConversation(conversationID string, ownerUserIDs ...string) ConversationCache
|
||||
// get one conversation from msgCache
|
||||
GetConversations(ctx context.Context, ownerUserID string,
|
||||
conversationIDs []string) ([]*relationtb.ConversationModel, error)
|
||||
// get one user's all conversations from msgCache
|
||||
GetUserAllConversations(ctx context.Context, ownerUserID string) ([]*relationtb.ConversationModel, error)
|
||||
// get user conversation recv msg from msgCache
|
||||
GetUserRecvMsgOpt(ctx context.Context, ownerUserID, conversationID string) (opt int, err error)
|
||||
DelUserRecvMsgOpt(ownerUserID, conversationID string) ConversationCache
|
||||
// get one super group recv msg but do not notification userID list
|
||||
// GetSuperGroupRecvMsgNotNotifyUserIDs(ctx context.Context, groupID string) (userIDs []string, err error)
|
||||
DelSuperGroupRecvMsgNotNotifyUserIDs(groupID string) ConversationCache
|
||||
// get one super group recv msg but do not notification userID list hash
|
||||
// GetSuperGroupRecvMsgNotNotifyUserIDsHash(ctx context.Context, groupID string) (hash uint64, err error)
|
||||
DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID string) ConversationCache
|
||||
|
||||
// GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error)
|
||||
DelUserAllHasReadSeqs(ownerUserID string, conversationIDs ...string) ConversationCache
|
||||
|
||||
GetConversationsByConversationID(ctx context.Context,
|
||||
conversationIDs []string) ([]*relationtb.ConversationModel, error)
|
||||
DelConversationByConversationID(conversationIDs ...string) ConversationCache
|
||||
GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error)
|
||||
DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) ConversationCache
|
||||
}
|
||||
|
||||
func NewConversationRedis(rdb redis.UniversalClient, localCache *config.LocalCache, opts rockscache.Options, db relationtb.ConversationModelInterface) ConversationCache {
|
||||
rcClient := rockscache.NewClient(rdb, opts)
|
||||
mc := NewMetaCacheRedis(rcClient)
|
||||
c := localCache.Conversation
|
||||
log.ZDebug(context.Background(), "black local cache init", "Topic", c.Topic, "SlotNum", c.SlotNum, "SlotSize", c.SlotSize, "enable", c.Enable())
|
||||
mc.SetTopic(c.Topic)
|
||||
mc.SetRawRedisClient(rdb)
|
||||
return &ConversationRedisCache{
|
||||
rcClient: rcClient,
|
||||
metaCache: mc,
|
||||
conversationDB: db,
|
||||
expireTime: conversationExpireTime,
|
||||
}
|
||||
}
|
||||
|
||||
type ConversationRedisCache struct {
|
||||
metaCache
|
||||
rcClient *rockscache.Client
|
||||
conversationDB relationtb.ConversationModelInterface
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
// func NewNewConversationRedis(
|
||||
// rdb redis.UniversalClient,
|
||||
// conversationDB *relation.ConversationGorm,
|
||||
// options rockscache.Options,
|
||||
// ) ConversationCache {
|
||||
// rcClient := rockscache.NewClient(rdb, options)
|
||||
//
|
||||
// return &ConversationRedisCache{
|
||||
// rcClient: rcClient,
|
||||
// metaCache: NewMetaCacheRedis(rcClient),
|
||||
// conversationDB: conversationDB,
|
||||
// expireTime: conversationExpireTime,
|
||||
// }
|
||||
//}
|
||||
|
||||
func (c *ConversationRedisCache) NewCache() ConversationCache {
|
||||
return &ConversationRedisCache{
|
||||
rcClient: c.rcClient,
|
||||
metaCache: c.Copy(),
|
||||
conversationDB: c.conversationDB,
|
||||
expireTime: c.expireTime,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationKey(ownerUserID, conversationID string) string {
|
||||
return cachekey.GetConversationKey(ownerUserID, conversationID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationIDsKey(ownerUserID string) string {
|
||||
return cachekey.GetConversationIDsKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getSuperGroupRecvNotNotifyUserIDsKey(groupID string) string {
|
||||
return cachekey.GetSuperGroupRecvNotNotifyUserIDsKey(groupID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getRecvMsgOptKey(ownerUserID, conversationID string) string {
|
||||
return cachekey.GetRecvMsgOptKey(ownerUserID, conversationID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getSuperGroupRecvNotNotifyUserIDsHashKey(groupID string) string {
|
||||
return cachekey.GetSuperGroupRecvNotNotifyUserIDsHashKey(groupID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationHasReadSeqKey(ownerUserID, conversationID string) string {
|
||||
return cachekey.GetConversationHasReadSeqKey(ownerUserID, conversationID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationNotReceiveMessageUserIDsKey(conversationID string) string {
|
||||
return cachekey.GetConversationNotReceiveMessageUserIDsKey(conversationID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getUserConversationIDsHashKey(ownerUserID string) string {
|
||||
return cachekey.GetUserConversationIDsHashKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserConversationIDs(ctx context.Context, ownerUserID string) ([]string, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationIDsKey(ownerUserID), c.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.FindUserIDAllConversationID(ctx, ownerUserID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversationIDs(userIDs ...string) ConversationCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, c.getConversationIDsKey(userID))
|
||||
}
|
||||
cache := c.NewCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserConversationIDsHash(ctx context.Context, ownerUserID string) (hash uint64, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
c.rcClient,
|
||||
c.getUserConversationIDsHashKey(ownerUserID),
|
||||
c.expireTime,
|
||||
func(ctx context.Context) (uint64, error) {
|
||||
conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
datautil.Sort(conversationIDs, true)
|
||||
bi := big.NewInt(0)
|
||||
bi.SetString(encrypt.Md5(strings.Join(conversationIDs, ";"))[0:8], 16)
|
||||
return bi.Uint64(), nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserConversationIDsHash(ownerUserIDs ...string) ConversationCache {
|
||||
keys := make([]string, 0, len(ownerUserIDs))
|
||||
for _, ownerUserID := range ownerUserIDs {
|
||||
keys = append(keys, c.getUserConversationIDsHashKey(ownerUserID))
|
||||
}
|
||||
cache := c.NewCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversation(ctx context.Context, ownerUserID, conversationID string) (*relationtb.ConversationModel, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationKey(ownerUserID, conversationID), c.expireTime, func(ctx context.Context) (*relationtb.ConversationModel, error) {
|
||||
return c.conversationDB.Take(ctx, ownerUserID, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversations(ownerUserID string, conversationIDs ...string) ConversationCache {
|
||||
keys := make([]string, 0, len(conversationIDs))
|
||||
for _, conversationID := range conversationIDs {
|
||||
keys = append(keys, c.getConversationKey(ownerUserID, conversationID))
|
||||
}
|
||||
cache := c.NewCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
// func (c *ConversationRedisCache) getConversationIndex(convsation *relationtb.ConversationModel, keys []string) (int, error) {
|
||||
// key := c.getConversationKey(convsation.OwnerUserID, convsation.ConversationID)
|
||||
// for _i, _key := range keys {
|
||||
// if _key == key {
|
||||
// return _i, nil
|
||||
// }
|
||||
// }
|
||||
|
||||
// return 0, errs.New("not found key:" + key + " in keys")
|
||||
// }
|
||||
|
||||
func (c *ConversationRedisCache) GetConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*relationtb.ConversationModel, error) {
|
||||
// var keys []string
|
||||
// for _, conversarionID := range conversationIDs {
|
||||
// keys = append(keys, c.getConversationKey(ownerUserID, conversarionID))
|
||||
//}
|
||||
// return batchGetCache(
|
||||
// ctx,
|
||||
// c.rcClient,
|
||||
// keys,
|
||||
// c.expireTime,
|
||||
// c.getConversationIndex,
|
||||
// func(ctx context.Context) ([]*relationtb.ConversationModel, error) {
|
||||
// return c.conversationDB.Find(ctx, ownerUserID, conversationIDs)
|
||||
// },
|
||||
//)
|
||||
return batchGetCache2(ctx, c.rcClient, c.expireTime, conversationIDs, func(conversationID string) string {
|
||||
return c.getConversationKey(ownerUserID, conversationID)
|
||||
}, func(ctx context.Context, conversationID string) (*relationtb.ConversationModel, error) {
|
||||
return c.conversationDB.Take(ctx, ownerUserID, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserAllConversations(ctx context.Context, ownerUserID string) ([]*relationtb.ConversationModel, error) {
|
||||
conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// var keys []string
|
||||
// for _, conversarionID := range conversationIDs {
|
||||
// keys = append(keys, c.getConversationKey(ownerUserID, conversarionID))
|
||||
//}
|
||||
// return batchGetCache(
|
||||
// ctx,
|
||||
// c.rcClient,
|
||||
// keys,
|
||||
// c.expireTime,
|
||||
// c.getConversationIndex,
|
||||
// func(ctx context.Context) ([]*relationtb.ConversationModel, error) {
|
||||
// return c.conversationDB.FindUserIDAllConversations(ctx, ownerUserID)
|
||||
// },
|
||||
//)
|
||||
return c.GetConversations(ctx, ownerUserID, conversationIDs)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserRecvMsgOpt(ctx context.Context, ownerUserID, conversationID string) (opt int, err error) {
|
||||
return getCache(ctx, c.rcClient, c.getRecvMsgOptKey(ownerUserID, conversationID), c.expireTime, func(ctx context.Context) (opt int, err error) {
|
||||
return c.conversationDB.GetUserRecvMsgOpt(ctx, ownerUserID, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
// func (c *ConversationRedisCache) GetSuperGroupRecvMsgNotNotifyUserIDs(ctx context.Context, groupID string) (userIDs []string, err error) {
|
||||
// return getCache(ctx, c.rcClient, c.getSuperGroupRecvNotNotifyUserIDsKey(groupID), c.expireTime, func(ctx context.Context) (userIDs []string, err error) {
|
||||
// return c.conversationDB.FindSuperGroupRecvMsgNotNotifyUserIDs(ctx, groupID)
|
||||
// })
|
||||
//}
|
||||
|
||||
func (c *ConversationRedisCache) DelUsersConversation(conversationID string, ownerUserIDs ...string) ConversationCache {
|
||||
keys := make([]string, 0, len(ownerUserIDs))
|
||||
for _, ownerUserID := range ownerUserIDs {
|
||||
keys = append(keys, c.getConversationKey(ownerUserID, conversationID))
|
||||
}
|
||||
cache := c.NewCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserRecvMsgOpt(ownerUserID, conversationID string) ConversationCache {
|
||||
cache := c.NewCache()
|
||||
cache.AddKeys(c.getRecvMsgOptKey(ownerUserID, conversationID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDs(groupID string) ConversationCache {
|
||||
cache := c.NewCache()
|
||||
cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsKey(groupID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
// func (c *ConversationRedisCache) GetSuperGroupRecvMsgNotNotifyUserIDsHash(ctx context.Context, groupID string) (hash uint64, err error) {
|
||||
// return getCache(ctx, c.rcClient, c.getSuperGroupRecvNotNotifyUserIDsHashKey(groupID), c.expireTime, func(ctx context.Context) (hash uint64, err error) {
|
||||
// userIDs, err := c.GetSuperGroupRecvMsgNotNotifyUserIDs(ctx, groupID)
|
||||
// if err != nil {
|
||||
// return 0, err
|
||||
// }
|
||||
// utils.Sort(userIDs, true)
|
||||
// bi := big.NewInt(0)
|
||||
// bi.SetString(utils.Md5(strings.Join(userIDs, ";"))[0:8], 16)
|
||||
// return bi.Uint64(), nil
|
||||
// },
|
||||
// )
|
||||
//}
|
||||
|
||||
func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID string) ConversationCache {
|
||||
cache := c.NewCache()
|
||||
cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsHashKey(groupID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserAllHasReadSeqs(ownerUserID string, conversationIDs ...string) ConversationCache {
|
||||
cache := c.NewCache()
|
||||
for _, conversationID := range conversationIDs {
|
||||
cache.AddKeys(c.getConversationHasReadSeqKey(ownerUserID, conversationID))
|
||||
}
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*relationtb.ConversationModel, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversationByConversationID(conversationIDs ...string) ConversationCache {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationNotReceiveMessageUserIDsKey(conversationID), c.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.GetConversationNotReceiveMessageUserIDs(ctx, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) ConversationCache {
|
||||
cache := c.NewCache()
|
||||
for _, conversationID := range conversationIDs {
|
||||
cache.AddKeys(c.getConversationNotReceiveMessageUserIDsKey(conversationID))
|
||||
}
|
||||
|
||||
return cache
|
||||
}
|
||||
284
pkg/common/db/cache/meta_cache.go
vendored
284
pkg/common/db/cache/meta_cache.go
vendored
@ -1,284 +0,0 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mw/specialerror"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const (
|
||||
scanCount = 3000
|
||||
maxRetryTimes = 5
|
||||
retryInterval = time.Millisecond * 100
|
||||
)
|
||||
|
||||
var errIndex = errs.New("err index")
|
||||
|
||||
type metaCache interface {
|
||||
ExecDel(ctx context.Context, distinct ...bool) error
|
||||
// delete key rapid
|
||||
DelKey(ctx context.Context, key string) error
|
||||
AddKeys(keys ...string)
|
||||
ClearKeys()
|
||||
GetPreDelKeys() []string
|
||||
SetTopic(topic string)
|
||||
SetRawRedisClient(cli redis.UniversalClient)
|
||||
Copy() metaCache
|
||||
}
|
||||
|
||||
func NewMetaCacheRedis(rcClient *rockscache.Client, keys ...string) metaCache {
|
||||
return &metaCacheRedis{rcClient: rcClient, keys: keys, maxRetryTimes: maxRetryTimes, retryInterval: retryInterval}
|
||||
}
|
||||
|
||||
type metaCacheRedis struct {
|
||||
topic string
|
||||
rcClient *rockscache.Client
|
||||
keys []string
|
||||
maxRetryTimes int
|
||||
retryInterval time.Duration
|
||||
redisClient redis.UniversalClient
|
||||
}
|
||||
|
||||
func (m *metaCacheRedis) Copy() metaCache {
|
||||
var keys []string
|
||||
if len(m.keys) > 0 {
|
||||
keys = make([]string, 0, len(m.keys)*2)
|
||||
keys = append(keys, m.keys...)
|
||||
}
|
||||
return &metaCacheRedis{
|
||||
topic: m.topic,
|
||||
rcClient: m.rcClient,
|
||||
keys: keys,
|
||||
maxRetryTimes: m.maxRetryTimes,
|
||||
retryInterval: m.retryInterval,
|
||||
redisClient: m.redisClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metaCacheRedis) SetTopic(topic string) {
|
||||
m.topic = topic
|
||||
}
|
||||
|
||||
func (m *metaCacheRedis) SetRawRedisClient(cli redis.UniversalClient) {
|
||||
m.redisClient = cli
|
||||
}
|
||||
|
||||
func (m *metaCacheRedis) ExecDel(ctx context.Context, distinct ...bool) error {
|
||||
if len(distinct) > 0 && distinct[0] {
|
||||
m.keys = datautil.Distinct(m.keys)
|
||||
}
|
||||
if len(m.keys) > 0 {
|
||||
log.ZDebug(ctx, "delete cache", "topic", m.topic, "keys", m.keys)
|
||||
for _, key := range m.keys {
|
||||
for i := 0; i < m.maxRetryTimes; i++ {
|
||||
if err := m.rcClient.TagAsDeleted(key); err != nil {
|
||||
log.ZError(ctx, "delete cache failed", err, "key", key)
|
||||
time.Sleep(m.retryInterval)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if pk := getPublishKey(m.topic, m.keys); len(pk) > 0 {
|
||||
data, err := json.Marshal(pk)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "keys json marshal failed", err, "topic", m.topic, "keys", pk)
|
||||
} else {
|
||||
if err := m.redisClient.Publish(ctx, m.topic, string(data)).Err(); err != nil {
|
||||
log.ZError(ctx, "redis publish cache delete error", err, "topic", m.topic, "keys", pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *metaCacheRedis) DelKey(ctx context.Context, key string) error {
|
||||
return m.rcClient.TagAsDeleted2(ctx, key)
|
||||
}
|
||||
|
||||
func (m *metaCacheRedis) AddKeys(keys ...string) {
|
||||
m.keys = append(m.keys, keys...)
|
||||
}
|
||||
|
||||
func (m *metaCacheRedis) ClearKeys() {
|
||||
m.keys = []string{}
|
||||
}
|
||||
|
||||
func (m *metaCacheRedis) GetPreDelKeys() []string {
|
||||
return m.keys
|
||||
}
|
||||
|
||||
func GetDefaultOpt() rockscache.Options {
|
||||
opts := rockscache.NewDefaultOptions()
|
||||
opts.StrongConsistency = true
|
||||
opts.RandomExpireAdjustment = 0.2
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key string, expire time.Duration, fn func(ctx context.Context) (T, error)) (T, error) {
|
||||
var t T
|
||||
var write bool
|
||||
v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) {
|
||||
t, err = fn(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
bs, err := json.Marshal(t)
|
||||
if err != nil {
|
||||
return "", errs.WrapMsg(err, "marshal failed")
|
||||
}
|
||||
write = true
|
||||
|
||||
return string(bs), nil
|
||||
})
|
||||
if err != nil {
|
||||
return t, errs.Wrap(err)
|
||||
}
|
||||
if write {
|
||||
return t, nil
|
||||
}
|
||||
if v == "" {
|
||||
return t, errs.ErrRecordNotFound.WrapMsg("cache is not found")
|
||||
}
|
||||
err = json.Unmarshal([]byte(v), &t)
|
||||
if err != nil {
|
||||
errInfo := fmt.Sprintf("cache json.Unmarshal failed, key:%s, value:%s, expire:%s", key, v, expire)
|
||||
return t, errs.WrapMsg(err, errInfo)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// func batchGetCache[T any](ctx context.Context, rcClient *rockscache.Client, keys []string, expire time.Duration, keyIndexFn func(t T, keys []string) (int, error), fn func(ctx context.Context) ([]T,
|
||||
// error)) ([]T, error) {
|
||||
// batchMap, err := rcClient.FetchBatch2(ctx, keys, expire, func(idxs []int) (m map[int]string, err error) {
|
||||
// values := make(map[int]string)
|
||||
// tArrays, err := fn(ctx)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// for _, v := range tArrays {
|
||||
// index, err := keyIndexFn(v, keys)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// bs, err := json.Marshal(v)
|
||||
// if err != nil {
|
||||
// return nil, utils.Wrap(err, "marshal failed")
|
||||
// }
|
||||
// values[index] = string(bs)
|
||||
// }
|
||||
// return values, nil
|
||||
// })
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// var tArrays []T
|
||||
// for _, v := range batchMap {
|
||||
// if v != "" {
|
||||
// var t T
|
||||
// err = json.Unmarshal([]byte(v), &t)
|
||||
// if err != nil {
|
||||
// return nil, utils.Wrap(err, "unmarshal failed")
|
||||
// }
|
||||
// tArrays = append(tArrays, t)
|
||||
// }
|
||||
// }
|
||||
// return tArrays, nil
|
||||
//}
|
||||
|
||||
func batchGetCache2[T any, K comparable](
|
||||
ctx context.Context,
|
||||
rcClient *rockscache.Client,
|
||||
expire time.Duration,
|
||||
keys []K,
|
||||
keyFn func(key K) string,
|
||||
fns func(ctx context.Context, key K) (T, error),
|
||||
) ([]T, error) {
|
||||
if len(keys) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
res := make([]T, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
val, err := getCache(ctx, rcClient, keyFn(key), expire, func(ctx context.Context) (T, error) {
|
||||
return fns(ctx, key)
|
||||
})
|
||||
if err != nil {
|
||||
if errs.ErrRecordNotFound.Is(specialerror.ErrCode(errs.Unwrap(err))) {
|
||||
continue
|
||||
}
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
res = append(res, val)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// func batchGetCacheMap[T any](
|
||||
// ctx context.Context,
|
||||
// rcClient *rockscache.Client,
|
||||
// keys, originKeys []string,
|
||||
// expire time.Duration,
|
||||
// keyIndexFn func(s string, keys []string) (int, error),
|
||||
// fn func(ctx context.Context) (map[string]T, error),
|
||||
// ) (map[string]T, error) {
|
||||
// batchMap, err := rcClient.FetchBatch2(ctx, keys, expire, func(idxs []int) (m map[int]string, err error) {
|
||||
// tArrays, err := fn(ctx)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// values := make(map[int]string)
|
||||
// for k, v := range tArrays {
|
||||
// index, err := keyIndexFn(k, originKeys)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// bs, err := json.Marshal(v)
|
||||
// if err != nil {
|
||||
// return nil, utils.Wrap(err, "marshal failed")
|
||||
// }
|
||||
// values[index] = string(bs)
|
||||
// }
|
||||
// return values, nil
|
||||
// })
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// tMap := make(map[string]T)
|
||||
// for i, v := range batchMap {
|
||||
// if v != "" {
|
||||
// var t T
|
||||
// err = json.Unmarshal([]byte(v), &t)
|
||||
// if err != nil {
|
||||
// return nil, utils.Wrap(err, "unmarshal failed")
|
||||
// }
|
||||
// tMap[originKeys[i]] = t
|
||||
// }
|
||||
// }
|
||||
// return tMap, nil
|
||||
//}
|
||||
553
pkg/common/db/cache/msg.go
vendored
553
pkg/common/db/cache/msg.go
vendored
@ -1,553 +0,0 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/jsonpb"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/stringutil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const msgCacheTimeout = 86400 * time.Second
|
||||
|
||||
const (
|
||||
maxSeq = "MAX_SEQ:"
|
||||
minSeq = "MIN_SEQ:"
|
||||
conversationUserMinSeq = "CON_USER_MIN_SEQ:"
|
||||
hasReadSeq = "HAS_READ_SEQ:"
|
||||
|
||||
getuiToken = "GETUI_TOKEN"
|
||||
getuiTaskID = "GETUI_TASK_ID"
|
||||
FCM_TOKEN = "FCM_TOKEN:"
|
||||
|
||||
messageCache = "MESSAGE_CACHE:"
|
||||
messageDelUserList = "MESSAGE_DEL_USER_LIST:"
|
||||
userDelMessagesList = "USER_DEL_MESSAGES_LIST:"
|
||||
sendMsgFailedFlag = "SEND_MSG_FAILED_FLAG:"
|
||||
userBadgeUnreadCountSum = "USER_BADGE_UNREAD_COUNT_SUM:"
|
||||
exTypeKeyLocker = "EX_LOCK:"
|
||||
)
|
||||
|
||||
var concurrentLimit = 3
|
||||
|
||||
//type MsgModel interface {
|
||||
// SeqCache
|
||||
// ThirdCache
|
||||
// MsgCache
|
||||
//}
|
||||
|
||||
type MsgCache interface {
|
||||
GetMessagesBySeq(ctx context.Context, conversationID string, seqs []int64) (seqMsg []*sdkws.MsgData, failedSeqList []int64, err error)
|
||||
SetMessageToCache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (int, error)
|
||||
UserDeleteMsgs(ctx context.Context, conversationID string, seqs []int64, userID string) error
|
||||
DelUserDeleteMsgsList(ctx context.Context, conversationID string, seqs []int64)
|
||||
DeleteMessages(ctx context.Context, conversationID string, seqs []int64) error
|
||||
GetUserDelList(ctx context.Context, userID, conversationID string) (seqs []int64, err error)
|
||||
CleanUpOneConversationAllMsg(ctx context.Context, conversationID string) error
|
||||
DelMsgFromCache(ctx context.Context, userID string, seqList []int64) error
|
||||
SetSendMsgStatus(ctx context.Context, id string, status int32) error
|
||||
GetSendMsgStatus(ctx context.Context, id string) (int32, error)
|
||||
JudgeMessageReactionExist(ctx context.Context, clientMsgID string, sessionType int32) (bool, error)
|
||||
GetOneMessageAllReactionList(ctx context.Context, clientMsgID string, sessionType int32) (map[string]string, error)
|
||||
DeleteOneMessageKey(ctx context.Context, clientMsgID string, sessionType int32, subKey string) error
|
||||
SetMessageReactionExpire(ctx context.Context, clientMsgID string, sessionType int32, expiration time.Duration) (bool, error)
|
||||
GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error)
|
||||
SetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey, value string) error
|
||||
LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error
|
||||
UnLockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error
|
||||
}
|
||||
|
||||
//func NewMsgCacheModel(client redis.UniversalClient, msgCacheTimeout int, redisConf *config.Redis) MsgModel {
|
||||
// return &msgCache{rdb: client, msgCacheTimeout: msgCacheTimeout, redisConf: redisConf}
|
||||
//}
|
||||
|
||||
func NewMsgCache(client redis.UniversalClient, redisEnablePipeline bool) MsgCache {
|
||||
return &msgCache{rdb: client, msgCacheTimeout: msgCacheTimeout, redisEnablePipeline: redisEnablePipeline}
|
||||
}
|
||||
|
||||
type msgCache struct {
|
||||
metaCache
|
||||
rdb redis.UniversalClient
|
||||
msgCacheTimeout time.Duration
|
||||
redisEnablePipeline bool
|
||||
}
|
||||
|
||||
func (c *msgCache) allMessageCacheKey(conversationID string) string {
|
||||
return messageCache + conversationID + "_*"
|
||||
}
|
||||
|
||||
func (c *msgCache) getMessageCacheKey(conversationID string, seq int64) string {
|
||||
return messageCache + conversationID + "_" + strconv.Itoa(int(seq))
|
||||
}
|
||||
|
||||
func (c *msgCache) SetMessageToCache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (int, error) {
|
||||
if c.redisEnablePipeline {
|
||||
return c.PipeSetMessageToCache(ctx, conversationID, msgs)
|
||||
}
|
||||
return c.ParallelSetMessageToCache(ctx, conversationID, msgs)
|
||||
}
|
||||
|
||||
func (c *msgCache) PipeSetMessageToCache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (int, error) {
|
||||
pipe := c.rdb.Pipeline()
|
||||
for _, msg := range msgs {
|
||||
s, err := msgprocessor.Pb2String(msg)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
key := c.getMessageCacheKey(conversationID, msg.Seq)
|
||||
_ = pipe.Set(ctx, key, s, c.msgCacheTimeout)
|
||||
}
|
||||
|
||||
results, err := pipe.Exec(ctx)
|
||||
if err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
|
||||
for _, res := range results {
|
||||
if res.Err() != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
return len(msgs), nil
|
||||
}
|
||||
|
||||
func (c *msgCache) ParallelSetMessageToCache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (int, error) {
|
||||
wg := errgroup.Group{}
|
||||
wg.SetLimit(concurrentLimit)
|
||||
|
||||
for _, msg := range msgs {
|
||||
msg := msg // closure safe var
|
||||
wg.Go(func() error {
|
||||
s, err := msgprocessor.Pb2String(msg)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
key := c.getMessageCacheKey(conversationID, msg.Seq)
|
||||
if err := c.rdb.Set(ctx, key, s, c.msgCacheTimeout).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err := wg.Wait()
|
||||
if err != nil {
|
||||
return 0, errs.WrapMsg(err, "wg.Wait failed")
|
||||
}
|
||||
|
||||
return len(msgs), nil
|
||||
}
|
||||
|
||||
func (c *msgCache) getMessageDelUserListKey(conversationID string, seq int64) string {
|
||||
return messageDelUserList + conversationID + ":" + strconv.Itoa(int(seq))
|
||||
}
|
||||
|
||||
func (c *msgCache) getUserDelList(conversationID, userID string) string {
|
||||
return userDelMessagesList + conversationID + ":" + userID
|
||||
}
|
||||
|
||||
func (c *msgCache) UserDeleteMsgs(ctx context.Context, conversationID string, seqs []int64, userID string) error {
|
||||
for _, seq := range seqs {
|
||||
delUserListKey := c.getMessageDelUserListKey(conversationID, seq)
|
||||
userDelListKey := c.getUserDelList(conversationID, userID)
|
||||
err := c.rdb.SAdd(ctx, delUserListKey, userID).Err()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
err = c.rdb.SAdd(ctx, userDelListKey, seq).Err()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
if err := c.rdb.Expire(ctx, delUserListKey, c.msgCacheTimeout).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
if err := c.rdb.Expire(ctx, userDelListKey, c.msgCacheTimeout).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
// pipe := c.rdb.Pipeline()
|
||||
// for _, seq := range seqs {
|
||||
// delUserListKey := c.getMessageDelUserListKey(conversationID, seq)
|
||||
// userDelListKey := c.getUserDelList(conversationID, userID)
|
||||
// err := pipe.SAdd(ctx, delUserListKey, userID).Err()
|
||||
// if err != nil {
|
||||
// return errs.Wrap(err)
|
||||
// }
|
||||
// err = pipe.SAdd(ctx, userDelListKey, seq).Err()
|
||||
// if err != nil {
|
||||
// return errs.Wrap(err)
|
||||
// }
|
||||
// if err := pipe.Expire(ctx, delUserListKey, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil {
|
||||
// return errs.Wrap(err)
|
||||
// }
|
||||
// if err := pipe.Expire(ctx, userDelListKey, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil {
|
||||
// return errs.Wrap(err)
|
||||
// }
|
||||
//}
|
||||
// _, err := pipe.Exec(ctx)
|
||||
// return errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *msgCache) GetUserDelList(ctx context.Context, userID, conversationID string) (seqs []int64, err error) {
|
||||
result, err := c.rdb.SMembers(ctx, c.getUserDelList(conversationID, userID)).Result()
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
seqs = make([]int64, len(result))
|
||||
for i, v := range result {
|
||||
seqs[i] = stringutil.StringToInt64(v)
|
||||
}
|
||||
|
||||
return seqs, nil
|
||||
}
|
||||
|
||||
func (c *msgCache) DelUserDeleteMsgsList(ctx context.Context, conversationID string, seqs []int64) {
|
||||
for _, seq := range seqs {
|
||||
delUsers, err := c.rdb.SMembers(ctx, c.getMessageDelUserListKey(conversationID, seq)).Result()
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq)
|
||||
|
||||
continue
|
||||
}
|
||||
if len(delUsers) > 0 {
|
||||
var failedFlag bool
|
||||
for _, userID := range delUsers {
|
||||
err = c.rdb.SRem(ctx, c.getUserDelList(conversationID, userID), seq).Err()
|
||||
if err != nil {
|
||||
failedFlag = true
|
||||
log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq, "userID", userID)
|
||||
}
|
||||
}
|
||||
if !failedFlag {
|
||||
if err := c.rdb.Del(ctx, c.getMessageDelUserListKey(conversationID, seq)).Err(); err != nil {
|
||||
log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// for _, seq := range seqs {
|
||||
// delUsers, err := c.rdb.SMembers(ctx, c.getMessageDelUserListKey(conversationID, seq)).Result()
|
||||
// if err != nil {
|
||||
// log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq)
|
||||
// continue
|
||||
// }
|
||||
// if len(delUsers) > 0 {
|
||||
// pipe := c.rdb.Pipeline()
|
||||
// var failedFlag bool
|
||||
// for _, userID := range delUsers {
|
||||
// err = pipe.SRem(ctx, c.getUserDelList(conversationID, userID), seq).Err()
|
||||
// if err != nil {
|
||||
// failedFlag = true
|
||||
// log.ZWarn(
|
||||
// ctx,
|
||||
// "DelUserDeleteMsgsList failed",
|
||||
// err,
|
||||
// "conversationID",
|
||||
// conversationID,
|
||||
// "seq",
|
||||
// seq,
|
||||
// "userID",
|
||||
// userID,
|
||||
// )
|
||||
// }
|
||||
// }
|
||||
// if !failedFlag {
|
||||
// if err := pipe.Del(ctx, c.getMessageDelUserListKey(conversationID, seq)).Err(); err != nil {
|
||||
// log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq)
|
||||
// }
|
||||
// }
|
||||
// if _, err := pipe.Exec(ctx); err != nil {
|
||||
// log.ZError(ctx, "pipe exec failed", err, "conversationID", conversationID, "seq", seq)
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
}
|
||||
|
||||
func (c *msgCache) DeleteMessages(ctx context.Context, conversationID string, seqs []int64) error {
|
||||
if c.redisEnablePipeline {
|
||||
return c.PipeDeleteMessages(ctx, conversationID, seqs)
|
||||
}
|
||||
|
||||
return c.ParallelDeleteMessages(ctx, conversationID, seqs)
|
||||
}
|
||||
|
||||
func (c *msgCache) ParallelDeleteMessages(ctx context.Context, conversationID string, seqs []int64) error {
|
||||
wg := errgroup.Group{}
|
||||
wg.SetLimit(concurrentLimit)
|
||||
|
||||
for _, seq := range seqs {
|
||||
seq := seq
|
||||
wg.Go(func() error {
|
||||
err := c.rdb.Del(ctx, c.getMessageCacheKey(conversationID, seq)).Err()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
return wg.Wait()
|
||||
}
|
||||
|
||||
func (c *msgCache) PipeDeleteMessages(ctx context.Context, conversationID string, seqs []int64) error {
|
||||
pipe := c.rdb.Pipeline()
|
||||
for _, seq := range seqs {
|
||||
_ = pipe.Del(ctx, c.getMessageCacheKey(conversationID, seq))
|
||||
}
|
||||
|
||||
results, err := pipe.Exec(ctx)
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "pipe.del")
|
||||
}
|
||||
|
||||
for _, res := range results {
|
||||
if res.Err() != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *msgCache) CleanUpOneConversationAllMsg(ctx context.Context, conversationID string) error {
|
||||
vals, err := c.rdb.Keys(ctx, c.allMessageCacheKey(conversationID)).Result()
|
||||
if errors.Is(err, redis.Nil) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
for _, v := range vals {
|
||||
if err := c.rdb.Del(ctx, v).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *msgCache) DelMsgFromCache(ctx context.Context, userID string, seqs []int64) error {
|
||||
for _, seq := range seqs {
|
||||
key := c.getMessageCacheKey(userID, seq)
|
||||
result, err := c.rdb.Get(ctx, key).Result()
|
||||
if err != nil {
|
||||
if errors.Is(err, redis.Nil) {
|
||||
continue
|
||||
}
|
||||
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
var msg sdkws.MsgData
|
||||
err = jsonpb.UnmarshalString(result, &msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg.Status = constant.MsgDeleted
|
||||
s, err := msgprocessor.Pb2String(&msg)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
if err := c.rdb.Set(ctx, key, s, c.msgCacheTimeout).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *msgCache) SetSendMsgStatus(ctx context.Context, id string, status int32) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, sendMsgFailedFlag+id, status, time.Hour*24).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, error) {
|
||||
result, err := c.rdb.Get(ctx, sendMsgFailedFlag+id).Int()
|
||||
|
||||
return int32(result), errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *msgCache) LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error {
|
||||
key := exTypeKeyLocker + clientMsgID + "_" + TypeKey
|
||||
|
||||
return errs.Wrap(c.rdb.SetNX(ctx, key, 1, time.Minute).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) UnLockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error {
|
||||
key := exTypeKeyLocker + clientMsgID + "_" + TypeKey
|
||||
|
||||
return errs.Wrap(c.rdb.Del(ctx, key).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) getMessageReactionExPrefix(clientMsgID string, sessionType int32) string {
|
||||
switch sessionType {
|
||||
case constant.SingleChatType:
|
||||
return "EX_SINGLE_" + clientMsgID
|
||||
case constant.WriteGroupChatType:
|
||||
return "EX_GROUP_" + clientMsgID
|
||||
case constant.ReadGroupChatType:
|
||||
return "EX_SUPER_GROUP_" + clientMsgID
|
||||
case constant.NotificationChatType:
|
||||
return "EX_NOTIFICATION" + clientMsgID
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *msgCache) JudgeMessageReactionExist(ctx context.Context, clientMsgID string, sessionType int32) (bool, error) {
|
||||
n, err := c.rdb.Exists(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType)).Result()
|
||||
if err != nil {
|
||||
return false, errs.Wrap(err)
|
||||
}
|
||||
|
||||
return n > 0, nil
|
||||
}
|
||||
|
||||
func (c *msgCache) SetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey, value string) error {
|
||||
return errs.Wrap(c.rdb.HSet(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), typeKey, value).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) SetMessageReactionExpire(ctx context.Context, clientMsgID string, sessionType int32, expiration time.Duration) (bool, error) {
|
||||
val, err := c.rdb.Expire(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), expiration).Result()
|
||||
return val, errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *msgCache) GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error) {
|
||||
val, err := c.rdb.HGet(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), typeKey).Result()
|
||||
return val, errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *msgCache) GetOneMessageAllReactionList(ctx context.Context, clientMsgID string, sessionType int32) (map[string]string, error) {
|
||||
val, err := c.rdb.HGetAll(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType)).Result()
|
||||
return val, errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *msgCache) DeleteOneMessageKey(ctx context.Context, clientMsgID string, sessionType int32, subKey string) error {
|
||||
return errs.Wrap(c.rdb.HDel(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), subKey).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) GetMessagesBySeq(ctx context.Context, conversationID string, seqs []int64) (seqMsgs []*sdkws.MsgData, failedSeqs []int64, err error) {
|
||||
if c.redisEnablePipeline {
|
||||
return c.PipeGetMessagesBySeq(ctx, conversationID, seqs)
|
||||
}
|
||||
|
||||
return c.ParallelGetMessagesBySeq(ctx, conversationID, seqs)
|
||||
}
|
||||
|
||||
func (c *msgCache) PipeGetMessagesBySeq(ctx context.Context, conversationID string, seqs []int64) (seqMsgs []*sdkws.MsgData, failedSeqs []int64, err error) {
|
||||
pipe := c.rdb.Pipeline()
|
||||
|
||||
results := []*redis.StringCmd{}
|
||||
for _, seq := range seqs {
|
||||
results = append(results, pipe.Get(ctx, c.getMessageCacheKey(conversationID, seq)))
|
||||
}
|
||||
|
||||
_, err = pipe.Exec(ctx)
|
||||
if err != nil && err != redis.Nil {
|
||||
return seqMsgs, failedSeqs, errs.WrapMsg(err, "pipe.get")
|
||||
}
|
||||
|
||||
for idx, res := range results {
|
||||
seq := seqs[idx]
|
||||
if res.Err() != nil {
|
||||
log.ZError(ctx, "GetMessagesBySeq failed", err, "conversationID", conversationID, "seq", seq, "err", res.Err())
|
||||
failedSeqs = append(failedSeqs, seq)
|
||||
continue
|
||||
}
|
||||
|
||||
msg := sdkws.MsgData{}
|
||||
if err = msgprocessor.String2Pb(res.Val(), &msg); err != nil {
|
||||
log.ZError(ctx, "GetMessagesBySeq Unmarshal failed", err, "res", res, "conversationID", conversationID, "seq", seq)
|
||||
failedSeqs = append(failedSeqs, seq)
|
||||
continue
|
||||
}
|
||||
|
||||
if msg.Status == constant.MsgDeleted {
|
||||
failedSeqs = append(failedSeqs, seq)
|
||||
continue
|
||||
}
|
||||
|
||||
seqMsgs = append(seqMsgs, &msg)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *msgCache) ParallelGetMessagesBySeq(ctx context.Context, conversationID string, seqs []int64) (seqMsgs []*sdkws.MsgData, failedSeqs []int64, err error) {
|
||||
type entry struct {
|
||||
err error
|
||||
msg *sdkws.MsgData
|
||||
}
|
||||
|
||||
wg := errgroup.Group{}
|
||||
wg.SetLimit(concurrentLimit)
|
||||
|
||||
results := make([]entry, len(seqs)) // set slice len/cap to length of seqs.
|
||||
for idx, seq := range seqs {
|
||||
// closure safe var
|
||||
idx := idx
|
||||
seq := seq
|
||||
|
||||
wg.Go(func() error {
|
||||
res, err := c.rdb.Get(ctx, c.getMessageCacheKey(conversationID, seq)).Result()
|
||||
if err != nil {
|
||||
log.ZError(ctx, "GetMessagesBySeq failed", err, "conversationID", conversationID, "seq", seq)
|
||||
results[idx] = entry{err: err}
|
||||
return nil
|
||||
}
|
||||
|
||||
msg := sdkws.MsgData{}
|
||||
if err = msgprocessor.String2Pb(res, &msg); err != nil {
|
||||
log.ZError(ctx, "GetMessagesBySeq Unmarshal failed", err, "res", res, "conversationID", conversationID, "seq", seq)
|
||||
results[idx] = entry{err: err}
|
||||
return nil
|
||||
}
|
||||
|
||||
if msg.Status == constant.MsgDeleted {
|
||||
results[idx] = entry{err: err}
|
||||
return nil
|
||||
}
|
||||
|
||||
results[idx] = entry{msg: &msg}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
_ = wg.Wait()
|
||||
|
||||
for idx, res := range results {
|
||||
if res.err != nil {
|
||||
failedSeqs = append(failedSeqs, seqs[idx])
|
||||
continue
|
||||
}
|
||||
|
||||
seqMsgs = append(seqMsgs, res.msg)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
401
pkg/common/db/cache/msg_test.go
vendored
401
pkg/common/db/cache/msg_test.go
vendored
@ -1,401 +0,0 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParallelSetMessageToCache(t *testing.T) {
|
||||
var (
|
||||
cid = fmt.Sprintf("cid-%v", rand.Int63())
|
||||
seqFirst = rand.Int63()
|
||||
msgs = []*sdkws.MsgData{}
|
||||
)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
msgs = append(msgs, &sdkws.MsgData{
|
||||
Seq: seqFirst + int64(i),
|
||||
})
|
||||
}
|
||||
|
||||
testParallelSetMessageToCache(t, cid, msgs)
|
||||
}
|
||||
|
||||
func testParallelSetMessageToCache(t *testing.T, cid string, msgs []*sdkws.MsgData) {
|
||||
rdb := redis.NewClient(&redis.Options{})
|
||||
defer rdb.Close()
|
||||
|
||||
cacher := msgCache{rdb: rdb}
|
||||
|
||||
ret, err := cacher.ParallelSetMessageToCache(context.Background(), cid, msgs)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(msgs), ret)
|
||||
|
||||
// validate
|
||||
for _, msg := range msgs {
|
||||
key := cacher.getMessageCacheKey(cid, msg.Seq)
|
||||
val, err := rdb.Exists(context.Background(), key).Result()
|
||||
assert.Nil(t, err)
|
||||
assert.EqualValues(t, 1, val)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPipeSetMessageToCache(t *testing.T) {
|
||||
var (
|
||||
cid = fmt.Sprintf("cid-%v", rand.Int63())
|
||||
seqFirst = rand.Int63()
|
||||
msgs = []*sdkws.MsgData{}
|
||||
)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
msgs = append(msgs, &sdkws.MsgData{
|
||||
Seq: seqFirst + int64(i),
|
||||
})
|
||||
}
|
||||
|
||||
testPipeSetMessageToCache(t, cid, msgs)
|
||||
}
|
||||
|
||||
func testPipeSetMessageToCache(t *testing.T, cid string, msgs []*sdkws.MsgData) {
|
||||
rdb := redis.NewClient(&redis.Options{})
|
||||
defer rdb.Close()
|
||||
|
||||
cacher := msgCache{rdb: rdb}
|
||||
|
||||
ret, err := cacher.PipeSetMessageToCache(context.Background(), cid, msgs)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(msgs), ret)
|
||||
|
||||
// validate
|
||||
for _, msg := range msgs {
|
||||
key := cacher.getMessageCacheKey(cid, msg.Seq)
|
||||
val, err := rdb.Exists(context.Background(), key).Result()
|
||||
assert.Nil(t, err)
|
||||
assert.EqualValues(t, 1, val)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMessagesBySeq(t *testing.T) {
|
||||
var (
|
||||
cid = fmt.Sprintf("cid-%v", rand.Int63())
|
||||
seqFirst = rand.Int63()
|
||||
msgs = []*sdkws.MsgData{}
|
||||
)
|
||||
|
||||
seqs := []int64{}
|
||||
for i := 0; i < 100; i++ {
|
||||
msgs = append(msgs, &sdkws.MsgData{
|
||||
Seq: seqFirst + int64(i),
|
||||
SendID: fmt.Sprintf("fake-sendid-%v", i),
|
||||
})
|
||||
seqs = append(seqs, seqFirst+int64(i))
|
||||
}
|
||||
|
||||
// set data to cache
|
||||
testPipeSetMessageToCache(t, cid, msgs)
|
||||
|
||||
// get data from cache with parallet mode
|
||||
testParallelGetMessagesBySeq(t, cid, seqs, msgs)
|
||||
|
||||
// get data from cache with pipeline mode
|
||||
testPipeGetMessagesBySeq(t, cid, seqs, msgs)
|
||||
}
|
||||
|
||||
func testParallelGetMessagesBySeq(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
||||
rdb := redis.NewClient(&redis.Options{})
|
||||
defer rdb.Close()
|
||||
|
||||
cacher := msgCache{rdb: rdb}
|
||||
|
||||
respMsgs, failedSeqs, err := cacher.ParallelGetMessagesBySeq(context.Background(), cid, seqs)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 0, len(failedSeqs))
|
||||
assert.Equal(t, len(respMsgs), len(seqs))
|
||||
|
||||
// validate
|
||||
for idx, msg := range respMsgs {
|
||||
assert.Equal(t, msg.Seq, inputMsgs[idx].Seq)
|
||||
assert.Equal(t, msg.SendID, inputMsgs[idx].SendID)
|
||||
}
|
||||
}
|
||||
|
||||
func testPipeGetMessagesBySeq(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
||||
rdb := redis.NewClient(&redis.Options{})
|
||||
defer rdb.Close()
|
||||
|
||||
cacher := msgCache{rdb: rdb}
|
||||
|
||||
respMsgs, failedSeqs, err := cacher.PipeGetMessagesBySeq(context.Background(), cid, seqs)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 0, len(failedSeqs))
|
||||
assert.Equal(t, len(respMsgs), len(seqs))
|
||||
|
||||
// validate
|
||||
for idx, msg := range respMsgs {
|
||||
assert.Equal(t, msg.Seq, inputMsgs[idx].Seq)
|
||||
assert.Equal(t, msg.SendID, inputMsgs[idx].SendID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMessagesBySeqWithEmptySeqs(t *testing.T) {
|
||||
var (
|
||||
cid = fmt.Sprintf("cid-%v", rand.Int63())
|
||||
seqFirst int64 = 0
|
||||
msgs = []*sdkws.MsgData{}
|
||||
)
|
||||
|
||||
seqs := []int64{}
|
||||
for i := 0; i < 100; i++ {
|
||||
msgs = append(msgs, &sdkws.MsgData{
|
||||
Seq: seqFirst + int64(i),
|
||||
SendID: fmt.Sprintf("fake-sendid-%v", i),
|
||||
})
|
||||
seqs = append(seqs, seqFirst+int64(i))
|
||||
}
|
||||
|
||||
// don't set cache, only get data from cache.
|
||||
|
||||
// get data from cache with parallet mode
|
||||
testParallelGetMessagesBySeqWithEmptry(t, cid, seqs, msgs)
|
||||
|
||||
// get data from cache with pipeline mode
|
||||
testPipeGetMessagesBySeqWithEmptry(t, cid, seqs, msgs)
|
||||
}
|
||||
|
||||
func testParallelGetMessagesBySeqWithEmptry(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
||||
rdb := redis.NewClient(&redis.Options{})
|
||||
defer rdb.Close()
|
||||
|
||||
cacher := msgCache{rdb: rdb}
|
||||
|
||||
respMsgs, failedSeqs, err := cacher.ParallelGetMessagesBySeq(context.Background(), cid, seqs)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(seqs), len(failedSeqs))
|
||||
assert.Equal(t, 0, len(respMsgs))
|
||||
}
|
||||
|
||||
func testPipeGetMessagesBySeqWithEmptry(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
||||
rdb := redis.NewClient(&redis.Options{})
|
||||
defer rdb.Close()
|
||||
|
||||
cacher := msgCache{rdb: rdb}
|
||||
|
||||
respMsgs, failedSeqs, err := cacher.PipeGetMessagesBySeq(context.Background(), cid, seqs)
|
||||
assert.Equal(t, err, redis.Nil)
|
||||
assert.Equal(t, len(seqs), len(failedSeqs))
|
||||
assert.Equal(t, 0, len(respMsgs))
|
||||
}
|
||||
|
||||
func TestGetMessagesBySeqWithLostHalfSeqs(t *testing.T) {
|
||||
var (
|
||||
cid = fmt.Sprintf("cid-%v", rand.Int63())
|
||||
seqFirst int64 = 0
|
||||
msgs = []*sdkws.MsgData{}
|
||||
)
|
||||
|
||||
seqs := []int64{}
|
||||
for i := 0; i < 100; i++ {
|
||||
msgs = append(msgs, &sdkws.MsgData{
|
||||
Seq: seqFirst + int64(i),
|
||||
SendID: fmt.Sprintf("fake-sendid-%v", i),
|
||||
})
|
||||
seqs = append(seqs, seqFirst+int64(i))
|
||||
}
|
||||
|
||||
// Only set half the number of messages.
|
||||
testParallelSetMessageToCache(t, cid, msgs[:50])
|
||||
|
||||
// get data from cache with parallet mode
|
||||
testParallelGetMessagesBySeqWithLostHalfSeqs(t, cid, seqs, msgs)
|
||||
|
||||
// get data from cache with pipeline mode
|
||||
testPipeGetMessagesBySeqWithLostHalfSeqs(t, cid, seqs, msgs)
|
||||
}
|
||||
|
||||
func testParallelGetMessagesBySeqWithLostHalfSeqs(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
||||
rdb := redis.NewClient(&redis.Options{})
|
||||
defer rdb.Close()
|
||||
|
||||
cacher := msgCache{rdb: rdb}
|
||||
|
||||
respMsgs, failedSeqs, err := cacher.ParallelGetMessagesBySeq(context.Background(), cid, seqs)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(seqs)/2, len(failedSeqs))
|
||||
assert.Equal(t, len(seqs)/2, len(respMsgs))
|
||||
|
||||
for idx, msg := range respMsgs {
|
||||
assert.Equal(t, msg.Seq, seqs[idx])
|
||||
}
|
||||
}
|
||||
|
||||
func testPipeGetMessagesBySeqWithLostHalfSeqs(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
||||
rdb := redis.NewClient(&redis.Options{})
|
||||
defer rdb.Close()
|
||||
|
||||
cacher := msgCache{rdb: rdb}
|
||||
|
||||
respMsgs, failedSeqs, err := cacher.PipeGetMessagesBySeq(context.Background(), cid, seqs)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(seqs)/2, len(failedSeqs))
|
||||
assert.Equal(t, len(seqs)/2, len(respMsgs))
|
||||
|
||||
for idx, msg := range respMsgs {
|
||||
assert.Equal(t, msg.Seq, seqs[idx])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPipeDeleteMessages(t *testing.T) {
|
||||
var (
|
||||
cid = fmt.Sprintf("cid-%v", rand.Int63())
|
||||
seqFirst = rand.Int63()
|
||||
msgs = []*sdkws.MsgData{}
|
||||
)
|
||||
|
||||
var seqs []int64
|
||||
for i := 0; i < 100; i++ {
|
||||
msgs = append(msgs, &sdkws.MsgData{
|
||||
Seq: seqFirst + int64(i),
|
||||
})
|
||||
seqs = append(seqs, msgs[i].Seq)
|
||||
}
|
||||
|
||||
testPipeSetMessageToCache(t, cid, msgs)
|
||||
testPipeDeleteMessagesOK(t, cid, seqs, msgs)
|
||||
|
||||
// set again
|
||||
testPipeSetMessageToCache(t, cid, msgs)
|
||||
testPipeDeleteMessagesMix(t, cid, seqs[:90], msgs)
|
||||
}
|
||||
|
||||
func testPipeDeleteMessagesOK(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
||||
rdb := redis.NewClient(&redis.Options{})
|
||||
defer rdb.Close()
|
||||
|
||||
cacher := msgCache{rdb: rdb}
|
||||
|
||||
err := cacher.PipeDeleteMessages(context.Background(), cid, seqs)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// validate
|
||||
for _, msg := range inputMsgs {
|
||||
key := cacher.getMessageCacheKey(cid, msg.Seq)
|
||||
val := rdb.Exists(context.Background(), key).Val()
|
||||
assert.EqualValues(t, 0, val)
|
||||
}
|
||||
}
|
||||
|
||||
func testPipeDeleteMessagesMix(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
||||
rdb := redis.NewClient(&redis.Options{})
|
||||
defer rdb.Close()
|
||||
|
||||
cacher := msgCache{rdb: rdb}
|
||||
|
||||
err := cacher.PipeDeleteMessages(context.Background(), cid, seqs)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// validate
|
||||
for idx, msg := range inputMsgs {
|
||||
key := cacher.getMessageCacheKey(cid, msg.Seq)
|
||||
val, err := rdb.Exists(context.Background(), key).Result()
|
||||
assert.Nil(t, err)
|
||||
if idx < 90 {
|
||||
assert.EqualValues(t, 0, val) // not exists
|
||||
continue
|
||||
}
|
||||
|
||||
assert.EqualValues(t, 1, val) // exists
|
||||
}
|
||||
}
|
||||
|
||||
func TestParallelDeleteMessages(t *testing.T) {
|
||||
var (
|
||||
cid = fmt.Sprintf("cid-%v", rand.Int63())
|
||||
seqFirst = rand.Int63()
|
||||
msgs = []*sdkws.MsgData{}
|
||||
)
|
||||
|
||||
var seqs []int64
|
||||
for i := 0; i < 100; i++ {
|
||||
msgs = append(msgs, &sdkws.MsgData{
|
||||
Seq: seqFirst + int64(i),
|
||||
})
|
||||
seqs = append(seqs, msgs[i].Seq)
|
||||
}
|
||||
|
||||
randSeqs := []int64{}
|
||||
for i := seqFirst + 100; i < seqFirst+200; i++ {
|
||||
randSeqs = append(randSeqs, i)
|
||||
}
|
||||
|
||||
testParallelSetMessageToCache(t, cid, msgs)
|
||||
testParallelDeleteMessagesOK(t, cid, seqs, msgs)
|
||||
|
||||
// set again
|
||||
testParallelSetMessageToCache(t, cid, msgs)
|
||||
testParallelDeleteMessagesMix(t, cid, seqs[:90], msgs, 90)
|
||||
testParallelDeleteMessagesOK(t, cid, seqs[90:], msgs[:90])
|
||||
|
||||
// set again
|
||||
testParallelSetMessageToCache(t, cid, msgs)
|
||||
testParallelDeleteMessagesMix(t, cid, randSeqs, msgs, 0)
|
||||
}
|
||||
|
||||
func testParallelDeleteMessagesOK(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
||||
rdb := redis.NewClient(&redis.Options{})
|
||||
defer rdb.Close()
|
||||
|
||||
cacher := msgCache{rdb: rdb}
|
||||
|
||||
err := cacher.PipeDeleteMessages(context.Background(), cid, seqs)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// validate
|
||||
for _, msg := range inputMsgs {
|
||||
key := cacher.getMessageCacheKey(cid, msg.Seq)
|
||||
val := rdb.Exists(context.Background(), key).Val()
|
||||
assert.EqualValues(t, 0, val)
|
||||
}
|
||||
}
|
||||
|
||||
func testParallelDeleteMessagesMix(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData, lessValNonExists int) {
|
||||
rdb := redis.NewClient(&redis.Options{})
|
||||
defer rdb.Close()
|
||||
|
||||
cacher := msgCache{rdb: rdb}
|
||||
|
||||
err := cacher.PipeDeleteMessages(context.Background(), cid, seqs)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// validate
|
||||
for idx, msg := range inputMsgs {
|
||||
key := cacher.getMessageCacheKey(cid, msg.Seq)
|
||||
val, err := rdb.Exists(context.Background(), key).Result()
|
||||
assert.Nil(t, err)
|
||||
if idx < lessValNonExists {
|
||||
assert.EqualValues(t, 0, val) // not exists
|
||||
continue
|
||||
}
|
||||
|
||||
assert.EqualValues(t, 1, val) // exists
|
||||
}
|
||||
}
|
||||
85
pkg/common/db/cache/third.go
vendored
85
pkg/common/db/cache/third.go
vendored
@ -1,85 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ThirdCache interface {
|
||||
SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error)
|
||||
GetFcmToken(ctx context.Context, account string, platformID int) (string, error)
|
||||
DelFcmToken(ctx context.Context, account string, platformID int) error
|
||||
IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error)
|
||||
SetUserBadgeUnreadCountSum(ctx context.Context, userID string, value int) error
|
||||
GetUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error)
|
||||
SetGetuiToken(ctx context.Context, token string, expireTime int64) error
|
||||
GetGetuiToken(ctx context.Context) (string, error)
|
||||
SetGetuiTaskID(ctx context.Context, taskID string, expireTime int64) error
|
||||
GetGetuiTaskID(ctx context.Context) (string, error)
|
||||
}
|
||||
|
||||
func NewThirdCache(rdb redis.UniversalClient) ThirdCache {
|
||||
return &thirdCache{rdb: rdb}
|
||||
}
|
||||
|
||||
type thirdCache struct {
|
||||
rdb redis.UniversalClient
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error) {
|
||||
return errs.Wrap(c.rdb.Set(ctx, FCM_TOKEN+account+":"+strconv.Itoa(platformID), fcmToken, time.Duration(expireTime)*time.Second).Err())
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetFcmToken(ctx context.Context, account string, platformID int) (string, error) {
|
||||
val, err := c.rdb.Get(ctx, FCM_TOKEN+account+":"+strconv.Itoa(platformID)).Result()
|
||||
if err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *thirdCache) DelFcmToken(ctx context.Context, account string, platformID int) error {
|
||||
return errs.Wrap(c.rdb.Del(ctx, FCM_TOKEN+account+":"+strconv.Itoa(platformID)).Err())
|
||||
}
|
||||
|
||||
func (c *thirdCache) IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
|
||||
seq, err := c.rdb.Incr(ctx, userBadgeUnreadCountSum+userID).Result()
|
||||
|
||||
return int(seq), errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetUserBadgeUnreadCountSum(ctx context.Context, userID string, value int) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, userBadgeUnreadCountSum+userID, value, 0).Err())
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
|
||||
val, err := c.rdb.Get(ctx, userBadgeUnreadCountSum+userID).Int()
|
||||
return val, errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetGetuiToken(ctx context.Context, token string, expireTime int64) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, getuiToken, token, time.Duration(expireTime)*time.Second).Err())
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetGetuiToken(ctx context.Context) (string, error) {
|
||||
val, err := c.rdb.Get(ctx, getuiToken).Result()
|
||||
if err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetGetuiTaskID(ctx context.Context, taskID string, expireTime int64) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, getuiTaskID, taskID, time.Duration(expireTime)*time.Second).Err())
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetGetuiTaskID(ctx context.Context) (string, error) {
|
||||
val, err := c.rdb.Get(ctx, getuiTaskID).Result()
|
||||
if err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
@ -1,15 +0,0 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package relation // import "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
@ -52,12 +52,9 @@ func Start[T any](ctx context.Context, discovery *config2.Discovery, prometheusC
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prometheusPort, err := datautil.GetElemByIndex(prometheusConfig.Ports, index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", rpcPort,
|
||||
"prometheusPort", prometheusPort)
|
||||
"prometheusPorts", prometheusConfig.Ports)
|
||||
rpcTcpAddr := net.JoinHostPort(network.GetListenIP(listenIP), strconv.Itoa(rpcPort))
|
||||
listener, err := net.Listen(
|
||||
"tcp",
|
||||
@ -117,9 +114,14 @@ func Start[T any](ctx context.Context, discovery *config2.Discovery, prometheusC
|
||||
netErr error
|
||||
httpServer *http.Server
|
||||
)
|
||||
|
||||
go func() {
|
||||
if prometheusConfig.Enable && prometheusPort != 0 {
|
||||
if prometheusConfig.Enable {
|
||||
go func() {
|
||||
prometheusPort, err := datautil.GetElemByIndex(prometheusConfig.Ports, index)
|
||||
if err != nil {
|
||||
netErr = err
|
||||
netDone <- struct{}{}
|
||||
return
|
||||
}
|
||||
metric.InitializeMetrics(srv)
|
||||
// Create a HTTP server for prometheus.
|
||||
httpServer = &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf("0.0.0.0:%d", prometheusPort)}
|
||||
@ -127,8 +129,8 @@ func Start[T any](ctx context.Context, discovery *config2.Discovery, prometheusC
|
||||
netErr = errs.WrapMsg(err, "prometheus start err", httpServer.Addr)
|
||||
netDone <- struct{}{}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}()
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := srv.Serve(listener)
|
||||
|
||||
17
pkg/common/storage/cache/batch_handler.go
vendored
Normal file
17
pkg/common/storage/cache/batch_handler.go
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// BatchDeleter interface defines a set of methods for batch deleting cache and publishing deletion information.
|
||||
type BatchDeleter interface {
|
||||
//ChainExecDel method is used for chain calls and must call Clone to prevent memory pollution.
|
||||
ChainExecDel(ctx context.Context) error
|
||||
//ExecDelWithKeys method directly takes keys for deletion.
|
||||
ExecDelWithKeys(ctx context.Context, keys []string) error
|
||||
//Clone method creates a copy of the BatchDeleter to avoid modifying the original object.
|
||||
Clone() BatchDeleter
|
||||
//AddKeys method adds keys to be deleted.
|
||||
AddKeys(keys ...string)
|
||||
}
|
||||
27
pkg/common/storage/cache/black.go
vendored
Normal file
27
pkg/common/storage/cache/black.go
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type BlackCache interface {
|
||||
BatchDeleter
|
||||
CloneBlackCache() BlackCache
|
||||
GetBlackIDs(ctx context.Context, userID string) (blackIDs []string, err error)
|
||||
// del user's blackIDs msgCache, exec when a user's black list changed
|
||||
DelBlackIDs(ctx context.Context, userID string) BlackCache
|
||||
}
|
||||
@ -12,4 +12,4 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cachekey // import "github.com/openimsdk/open-im-server/v3/pkg/common/cachekey"
|
||||
package cachekey // import "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cachekey"
|
||||
66
pkg/common/storage/cache/cachekey/msg.go
vendored
Normal file
66
pkg/common/storage/cache/cachekey/msg.go
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cachekey
|
||||
|
||||
import (
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
messageCache = "MESSAGE_CACHE:"
|
||||
messageDelUserList = "MESSAGE_DEL_USER_LIST:"
|
||||
userDelMessagesList = "USER_DEL_MESSAGES_LIST:"
|
||||
sendMsgFailedFlag = "SEND_MSG_FAILED_FLAG:"
|
||||
exTypeKeyLocker = "EX_LOCK:"
|
||||
reactionExSingle = "EX_SINGLE_"
|
||||
reactionWriteGroup = "EX_GROUP_"
|
||||
reactionReadGroup = "EX_SUPER_GROUP_"
|
||||
reactionNotification = "EX_NOTIFICATION_"
|
||||
)
|
||||
|
||||
func GetMessageCacheKey(conversationID string, seq int64) string {
|
||||
return messageCache + conversationID + "_" + strconv.Itoa(int(seq))
|
||||
}
|
||||
|
||||
func GetMessageDelUserListKey(conversationID string, seq int64) string {
|
||||
return messageDelUserList + conversationID + ":" + strconv.Itoa(int(seq))
|
||||
}
|
||||
|
||||
func GetUserDelListKey(conversationID, userID string) string {
|
||||
return userDelMessagesList + conversationID + ":" + userID
|
||||
}
|
||||
|
||||
func GetMessageReactionExKey(clientMsgID string, sessionType int32) string {
|
||||
switch sessionType {
|
||||
case constant.SingleChatType:
|
||||
return reactionExSingle + clientMsgID
|
||||
case constant.WriteGroupChatType:
|
||||
return reactionWriteGroup + clientMsgID
|
||||
case constant.ReadGroupChatType:
|
||||
return reactionReadGroup + clientMsgID
|
||||
case constant.NotificationChatType:
|
||||
return reactionNotification + clientMsgID
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
func GetLockMessageTypeKey(clientMsgID string, TypeKey string) string {
|
||||
return exTypeKeyLocker + clientMsgID + "_" + TypeKey
|
||||
}
|
||||
|
||||
func GetSendMsgKey(id string) string {
|
||||
return sendMsgFailedFlag + id
|
||||
}
|
||||
40
pkg/common/storage/cache/cachekey/s3.go
vendored
Normal file
40
pkg/common/storage/cache/cachekey/s3.go
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cachekey
|
||||
|
||||
import "strconv"
|
||||
|
||||
const (
|
||||
object = "OBJECT:"
|
||||
s3 = "S3:"
|
||||
minioImageInfo = "MINIO:IMAGE:"
|
||||
minioThumbnail = "MINIO:THUMBNAIL:"
|
||||
)
|
||||
|
||||
func GetObjectKey(engine string, name string) string {
|
||||
return object + engine + ":" + name
|
||||
}
|
||||
|
||||
func GetS3Key(engine string, name string) string {
|
||||
return s3 + engine + ":" + name
|
||||
}
|
||||
|
||||
func GetObjectImageInfoKey(key string) string {
|
||||
return minioImageInfo + key
|
||||
}
|
||||
|
||||
func GetMinioImageThumbnailKey(key string, format string, width int, height int) string {
|
||||
return minioThumbnail + format + ":w" + strconv.Itoa(width) + ":h" + strconv.Itoa(height) + ":" + key
|
||||
}
|
||||
38
pkg/common/storage/cache/cachekey/seq.go
vendored
Normal file
38
pkg/common/storage/cache/cachekey/seq.go
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cachekey
|
||||
|
||||
const (
|
||||
maxSeq = "MAX_SEQ:"
|
||||
minSeq = "MIN_SEQ:"
|
||||
conversationUserMinSeq = "CON_USER_MIN_SEQ:"
|
||||
hasReadSeq = "HAS_READ_SEQ:"
|
||||
)
|
||||
|
||||
func GetMaxSeqKey(conversationID string) string {
|
||||
return maxSeq + conversationID
|
||||
}
|
||||
|
||||
func GetMinSeqKey(conversationID string) string {
|
||||
return minSeq + conversationID
|
||||
}
|
||||
|
||||
func GetHasReadSeqKey(conversationID string, userID string) string {
|
||||
return hasReadSeq + userID + ":" + conversationID
|
||||
}
|
||||
|
||||
func GetConversationUserMinSeqKey(conversationID, userID string) string {
|
||||
return conversationUserMinSeq + conversationID + "u:" + userID
|
||||
}
|
||||
41
pkg/common/storage/cache/cachekey/third.go
vendored
Normal file
41
pkg/common/storage/cache/cachekey/third.go
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cachekey
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
getuiToken = "GETUI_TOKEN"
|
||||
getuiTaskID = "GETUI_TASK_ID"
|
||||
fmcToken = "FCM_TOKEN:"
|
||||
userBadgeUnreadCountSum = "USER_BADGE_UNREAD_COUNT_SUM:"
|
||||
)
|
||||
|
||||
func GetFcmAccountTokenKey(account string, platformID int) string {
|
||||
return fmcToken + account + ":" + strconv.Itoa(platformID)
|
||||
}
|
||||
|
||||
func GetUserBadgeUnreadCountSumKey(userID string) string {
|
||||
return userBadgeUnreadCountSum + userID
|
||||
}
|
||||
|
||||
func GetGetuiTokenKey() string {
|
||||
return getuiToken
|
||||
}
|
||||
func GetGetuiTaskIDKey() string {
|
||||
return getuiTaskID
|
||||
}
|
||||
@ -17,6 +17,7 @@ package cachekey
|
||||
const (
|
||||
UserInfoKey = "USER_INFO:"
|
||||
UserGlobalRecvMsgOptKey = "USER_GLOBAL_RECV_MSG_OPT_KEY:"
|
||||
olineStatusKey = "ONLINE_STATUS:"
|
||||
)
|
||||
|
||||
func GetUserInfoKey(userID string) string {
|
||||
@ -26,3 +27,7 @@ func GetUserInfoKey(userID string) string {
|
||||
func GetUserGlobalRecvMsgOptKey(userID string) string {
|
||||
return UserGlobalRecvMsgOptKey + userID
|
||||
}
|
||||
|
||||
func GetOnlineStatusKey(modKey string) string {
|
||||
return olineStatusKey + modKey
|
||||
}
|
||||
57
pkg/common/storage/cache/conversation.go
vendored
Normal file
57
pkg/common/storage/cache/conversation.go
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
)
|
||||
|
||||
// arg fn will exec when no data in msgCache.
|
||||
type ConversationCache interface {
|
||||
BatchDeleter
|
||||
CloneConversationCache() ConversationCache
|
||||
// get user's conversationIDs from msgCache
|
||||
GetUserConversationIDs(ctx context.Context, ownerUserID string) ([]string, error)
|
||||
DelConversationIDs(userIDs ...string) ConversationCache
|
||||
|
||||
GetUserConversationIDsHash(ctx context.Context, ownerUserID string) (hash uint64, err error)
|
||||
DelUserConversationIDsHash(ownerUserIDs ...string) ConversationCache
|
||||
|
||||
// get one conversation from msgCache
|
||||
GetConversation(ctx context.Context, ownerUserID, conversationID string) (*relationtb.Conversation, error)
|
||||
DelConversations(ownerUserID string, conversationIDs ...string) ConversationCache
|
||||
DelUsersConversation(conversationID string, ownerUserIDs ...string) ConversationCache
|
||||
// get one conversation from msgCache
|
||||
GetConversations(ctx context.Context, ownerUserID string,
|
||||
conversationIDs []string) ([]*relationtb.Conversation, error)
|
||||
// get one user's all conversations from msgCache
|
||||
GetUserAllConversations(ctx context.Context, ownerUserID string) ([]*relationtb.Conversation, error)
|
||||
// get user conversation recv msg from msgCache
|
||||
GetUserRecvMsgOpt(ctx context.Context, ownerUserID, conversationID string) (opt int, err error)
|
||||
DelUserRecvMsgOpt(ownerUserID, conversationID string) ConversationCache
|
||||
// get one super group recv msg but do not notification userID list
|
||||
// GetSuperGroupRecvMsgNotNotifyUserIDs(ctx context.Context, groupID string) (userIDs []string, err error)
|
||||
DelSuperGroupRecvMsgNotNotifyUserIDs(groupID string) ConversationCache
|
||||
// get one super group recv msg but do not notification userID list hash
|
||||
// GetSuperGroupRecvMsgNotNotifyUserIDsHash(ctx context.Context, groupID string) (hash uint64, err error)
|
||||
DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID string) ConversationCache
|
||||
|
||||
// GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error)
|
||||
DelUserAllHasReadSeqs(ownerUserID string, conversationIDs ...string) ConversationCache
|
||||
|
||||
GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error)
|
||||
DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) ConversationCache
|
||||
}
|
||||
@ -12,4 +12,4 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache // import "github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
package cache // import "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
35
pkg/common/storage/cache/friend.go
vendored
Normal file
35
pkg/common/storage/cache/friend.go
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
)
|
||||
|
||||
// FriendCache is an interface for caching friend-related data.
|
||||
type FriendCache interface {
|
||||
BatchDeleter
|
||||
CloneFriendCache() FriendCache
|
||||
GetFriendIDs(ctx context.Context, ownerUserID string) (friendIDs []string, err error)
|
||||
// Called when friendID list changed
|
||||
DelFriendIDs(ownerUserID ...string) FriendCache
|
||||
// Get single friendInfo from the cache
|
||||
GetFriend(ctx context.Context, ownerUserID, friendUserID string) (friend *relationtb.Friend, err error)
|
||||
// Delete friend when friend info changed
|
||||
DelFriend(ownerUserID, friendUserID string) FriendCache
|
||||
// Delete friends when friends' info changed
|
||||
DelFriends(ownerUserID string, friendUserIDs []string) FriendCache
|
||||
}
|
||||
62
pkg/common/storage/cache/group.go
vendored
Normal file
62
pkg/common/storage/cache/group.go
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/common"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
)
|
||||
|
||||
type GroupHash interface {
|
||||
GetGroupHash(ctx context.Context, groupID string) (uint64, error)
|
||||
}
|
||||
|
||||
type GroupCache interface {
|
||||
BatchDeleter
|
||||
CloneGroupCache() GroupCache
|
||||
GetGroupsInfo(ctx context.Context, groupIDs []string) (groups []*model.Group, err error)
|
||||
GetGroupInfo(ctx context.Context, groupID string) (group *model.Group, err error)
|
||||
DelGroupsInfo(groupIDs ...string) GroupCache
|
||||
|
||||
GetGroupMembersHash(ctx context.Context, groupID string) (hashCode uint64, err error)
|
||||
GetGroupMemberHashMap(ctx context.Context, groupIDs []string) (map[string]*common.GroupSimpleUserID, error)
|
||||
DelGroupMembersHash(groupID string) GroupCache
|
||||
|
||||
GetGroupMemberIDs(ctx context.Context, groupID string) (groupMemberIDs []string, err error)
|
||||
GetGroupsMemberIDs(ctx context.Context, groupIDs []string) (groupMemberIDs map[string][]string, err error)
|
||||
|
||||
DelGroupMemberIDs(groupID string) GroupCache
|
||||
|
||||
GetJoinedGroupIDs(ctx context.Context, userID string) (joinedGroupIDs []string, err error)
|
||||
DelJoinedGroupID(userID ...string) GroupCache
|
||||
|
||||
GetGroupMemberInfo(ctx context.Context, groupID, userID string) (groupMember *model.GroupMember, err error)
|
||||
GetGroupMembersInfo(ctx context.Context, groupID string, userID []string) (groupMembers []*model.GroupMember, err error)
|
||||
GetAllGroupMembersInfo(ctx context.Context, groupID string) (groupMembers []*model.GroupMember, err error)
|
||||
GetGroupMembersPage(ctx context.Context, groupID string, userID []string, showNumber, pageNumber int32) (total uint32, groupMembers []*model.GroupMember, err error)
|
||||
FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) ([]*model.GroupMember, error)
|
||||
|
||||
GetGroupRoleLevelMemberIDs(ctx context.Context, groupID string, roleLevel int32) ([]string, error)
|
||||
GetGroupOwner(ctx context.Context, groupID string) (*model.GroupMember, error)
|
||||
GetGroupsOwner(ctx context.Context, groupIDs []string) ([]*model.GroupMember, error)
|
||||
DelGroupRoleLevel(groupID string, roleLevel []int32) GroupCache
|
||||
DelGroupAllRoleLevel(groupID string) GroupCache
|
||||
DelGroupMembersInfo(groupID string, userID ...string) GroupCache
|
||||
GetGroupRoleLevelMemberInfo(ctx context.Context, groupID string, roleLevel int32) ([]*model.GroupMember, error)
|
||||
GetGroupRolesLevelMemberInfo(ctx context.Context, groupID string, roleLevels []int32) ([]*model.GroupMember, error)
|
||||
GetGroupMemberNum(ctx context.Context, groupID string) (memberNum int64, err error)
|
||||
DelGroupsMemberNum(groupID ...string) GroupCache
|
||||
}
|
||||
38
pkg/common/storage/cache/msg.go
vendored
Normal file
38
pkg/common/storage/cache/msg.go
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
)
|
||||
|
||||
type MsgCache interface {
|
||||
GetMessagesBySeq(ctx context.Context, conversationID string, seqs []int64) (seqMsg []*sdkws.MsgData, failedSeqList []int64, err error)
|
||||
SetMessagesToCache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (int, error)
|
||||
DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error
|
||||
SetSendMsgStatus(ctx context.Context, id string, status int32) error
|
||||
GetSendMsgStatus(ctx context.Context, id string) (int32, error)
|
||||
JudgeMessageReactionExist(ctx context.Context, clientMsgID string, sessionType int32) (bool, error)
|
||||
GetOneMessageAllReactionList(ctx context.Context, clientMsgID string, sessionType int32) (map[string]string, error)
|
||||
DeleteOneMessageKey(ctx context.Context, clientMsgID string, sessionType int32, subKey string) error
|
||||
SetMessageReactionExpire(ctx context.Context, clientMsgID string, sessionType int32, expiration time.Duration) (bool, error)
|
||||
GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error)
|
||||
SetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey, value string) error
|
||||
LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error
|
||||
UnLockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error
|
||||
}
|
||||
176
pkg/common/storage/cache/redis/batch_handler.go
vendored
Normal file
176
pkg/common/storage/cache/redis/batch_handler.go
vendored
Normal file
@ -0,0 +1,176 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mw/specialerror"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BatchDeleterRedis is a concrete implementation of the BatchDeleter interface based on Redis and RocksCache.
|
||||
type BatchDeleterRedis struct {
|
||||
redisClient redis.UniversalClient
|
||||
keys []string
|
||||
rocksClient *rockscache.Client
|
||||
redisPubTopics []string
|
||||
}
|
||||
|
||||
// NewBatchDeleterRedis creates a new BatchDeleterRedis instance.
|
||||
func NewBatchDeleterRedis(redisClient redis.UniversalClient, options *rockscache.Options, redisPubTopics []string) *BatchDeleterRedis {
|
||||
return &BatchDeleterRedis{
|
||||
redisClient: redisClient,
|
||||
rocksClient: rockscache.NewClient(redisClient, *options),
|
||||
redisPubTopics: redisPubTopics,
|
||||
}
|
||||
}
|
||||
|
||||
// ExecDelWithKeys directly takes keys for batch deletion and publishes deletion information.
|
||||
func (c *BatchDeleterRedis) ExecDelWithKeys(ctx context.Context, keys []string) error {
|
||||
distinctKeys := datautil.Distinct(keys)
|
||||
return c.execDel(ctx, distinctKeys)
|
||||
}
|
||||
|
||||
// ChainExecDel is used for chain calls for batch deletion. It must call Clone to prevent memory pollution.
|
||||
func (c *BatchDeleterRedis) ChainExecDel(ctx context.Context) error {
|
||||
distinctKeys := datautil.Distinct(c.keys)
|
||||
return c.execDel(ctx, distinctKeys)
|
||||
}
|
||||
|
||||
// execDel performs batch deletion and publishes the keys that have been deleted to update the local cache information of other nodes.
|
||||
func (c *BatchDeleterRedis) execDel(ctx context.Context, keys []string) error {
|
||||
if len(keys) > 0 {
|
||||
log.ZDebug(ctx, "delete cache", "topic", c.redisPubTopics, "keys", keys)
|
||||
// Batch delete keys
|
||||
err := ProcessKeysBySlot(ctx, c.redisClient, keys, func(ctx context.Context, slot int64, keys []string) error {
|
||||
return c.rocksClient.TagAsDeletedBatch2(ctx, keys)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Publish the keys that have been deleted to Redis to update the local cache information of other nodes
|
||||
if len(c.redisPubTopics) > 0 && len(keys) > 0 {
|
||||
keysByTopic := localcache.GetPublishKeysByTopic(c.redisPubTopics, keys)
|
||||
for topic, keys := range keysByTopic {
|
||||
if len(keys) > 0 {
|
||||
data, err := json.Marshal(keys)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "keys json marshal failed", err, "topic", topic, "keys", keys)
|
||||
} else {
|
||||
if err := c.redisClient.Publish(ctx, topic, string(data)).Err(); err != nil {
|
||||
log.ZWarn(ctx, "redis publish cache delete error", err, "topic", topic, "keys", keys)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clone creates a copy of BatchDeleterRedis for chain calls to prevent memory pollution.
|
||||
func (c *BatchDeleterRedis) Clone() cache.BatchDeleter {
|
||||
return &BatchDeleterRedis{
|
||||
redisClient: c.redisClient,
|
||||
keys: c.keys,
|
||||
rocksClient: c.rocksClient,
|
||||
redisPubTopics: c.redisPubTopics,
|
||||
}
|
||||
}
|
||||
|
||||
// AddKeys adds keys to be deleted.
|
||||
func (c *BatchDeleterRedis) AddKeys(keys ...string) {
|
||||
c.keys = append(c.keys, keys...)
|
||||
}
|
||||
|
||||
// GetRocksCacheOptions returns the default configuration options for RocksCache.
|
||||
func GetRocksCacheOptions() *rockscache.Options {
|
||||
opts := rockscache.NewDefaultOptions()
|
||||
opts.StrongConsistency = true
|
||||
opts.RandomExpireAdjustment = 0.2
|
||||
|
||||
return &opts
|
||||
}
|
||||
|
||||
func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key string, expire time.Duration, fn func(ctx context.Context) (T, error)) (T, error) {
|
||||
var t T
|
||||
var write bool
|
||||
v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) {
|
||||
t, err = fn(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
bs, err := json.Marshal(t)
|
||||
if err != nil {
|
||||
return "", errs.WrapMsg(err, "marshal failed")
|
||||
}
|
||||
write = true
|
||||
|
||||
return string(bs), nil
|
||||
})
|
||||
if err != nil {
|
||||
return t, errs.Wrap(err)
|
||||
}
|
||||
if write {
|
||||
return t, nil
|
||||
}
|
||||
if v == "" {
|
||||
return t, errs.ErrRecordNotFound.WrapMsg("cache is not found")
|
||||
}
|
||||
err = json.Unmarshal([]byte(v), &t)
|
||||
if err != nil {
|
||||
errInfo := fmt.Sprintf("cache json.Unmarshal failed, key:%s, value:%s, expire:%s", key, v, expire)
|
||||
return t, errs.WrapMsg(err, errInfo)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func batchGetCache[T any, K comparable](
|
||||
ctx context.Context,
|
||||
rcClient *rockscache.Client,
|
||||
expire time.Duration,
|
||||
keys []K,
|
||||
keyFn func(key K) string,
|
||||
fns func(ctx context.Context, key K) (T, error),
|
||||
) ([]T, error) {
|
||||
if len(keys) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
res := make([]T, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
val, err := getCache(ctx, rcClient, keyFn(key), expire, func(ctx context.Context) (T, error) {
|
||||
return fns(ctx, key)
|
||||
})
|
||||
if err != nil {
|
||||
if errs.ErrRecordNotFound.Is(specialerror.ErrCode(errs.Unwrap(err))) {
|
||||
continue
|
||||
}
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
res = append(res, val)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
@ -12,63 +12,49 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
blackIDsKey = "BLACK_IDS:"
|
||||
blackExpireTime = time.Second * 60 * 60 * 12
|
||||
)
|
||||
|
||||
// args fn will exec when no data in msgCache.
|
||||
type BlackCache interface {
|
||||
// get blackIDs from msgCache
|
||||
metaCache
|
||||
NewCache() BlackCache
|
||||
GetBlackIDs(ctx context.Context, userID string) (blackIDs []string, err error)
|
||||
// del user's blackIDs msgCache, exec when a user's black list changed
|
||||
DelBlackIDs(ctx context.Context, userID string) BlackCache
|
||||
}
|
||||
|
||||
type BlackCacheRedis struct {
|
||||
metaCache
|
||||
cache.BatchDeleter
|
||||
expireTime time.Duration
|
||||
rcClient *rockscache.Client
|
||||
blackDB relationtb.BlackModelInterface
|
||||
blackDB database.Black
|
||||
}
|
||||
|
||||
func NewBlackCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, blackDB relationtb.BlackModelInterface, options rockscache.Options) BlackCache {
|
||||
rcClient := rockscache.NewClient(rdb, options)
|
||||
mc := NewMetaCacheRedis(rcClient)
|
||||
func NewBlackCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, blackDB database.Black, options *rockscache.Options) cache.BlackCache {
|
||||
batchHandler := NewBatchDeleterRedis(rdb, options, []string{localCache.Friend.Topic})
|
||||
b := localCache.Friend
|
||||
log.ZDebug(context.Background(), "black local cache init", "Topic", b.Topic, "SlotNum", b.SlotNum, "SlotSize", b.SlotSize, "enable", b.Enable())
|
||||
mc.SetTopic(b.Topic)
|
||||
mc.SetRawRedisClient(rdb)
|
||||
return &BlackCacheRedis{
|
||||
expireTime: blackExpireTime,
|
||||
rcClient: rcClient,
|
||||
metaCache: mc,
|
||||
blackDB: blackDB,
|
||||
BatchDeleter: batchHandler,
|
||||
expireTime: blackExpireTime,
|
||||
rcClient: rockscache.NewClient(rdb, *options),
|
||||
blackDB: blackDB,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BlackCacheRedis) NewCache() BlackCache {
|
||||
func (b *BlackCacheRedis) CloneBlackCache() cache.BlackCache {
|
||||
return &BlackCacheRedis{
|
||||
expireTime: b.expireTime,
|
||||
rcClient: b.rcClient,
|
||||
blackDB: b.blackDB,
|
||||
metaCache: b.Copy(),
|
||||
BatchDeleter: b.BatchDeleter.Clone(),
|
||||
expireTime: b.expireTime,
|
||||
rcClient: b.rcClient,
|
||||
blackDB: b.blackDB,
|
||||
}
|
||||
}
|
||||
|
||||
@ -88,8 +74,8 @@ func (b *BlackCacheRedis) GetBlackIDs(ctx context.Context, userID string) (black
|
||||
)
|
||||
}
|
||||
|
||||
func (b *BlackCacheRedis) DelBlackIDs(ctx context.Context, userID string) BlackCache {
|
||||
cache := b.NewCache()
|
||||
func (b *BlackCacheRedis) DelBlackIDs(_ context.Context, userID string) cache.BlackCache {
|
||||
cache := b.CloneBlackCache()
|
||||
cache.AddKeys(b.getBlackIDsKey(userID))
|
||||
|
||||
return cache
|
||||
238
pkg/common/storage/cache/redis/conversation.go
vendored
Normal file
238
pkg/common/storage/cache/redis/conversation.go
vendored
Normal file
@ -0,0 +1,238 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/encrypt"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
conversationExpireTime = time.Second * 60 * 60 * 12
|
||||
)
|
||||
|
||||
func NewConversationRedis(rdb redis.UniversalClient, localCache *config.LocalCache, opts *rockscache.Options, db database.Conversation) cache.ConversationCache {
|
||||
batchHandler := NewBatchDeleterRedis(rdb, opts, []string{localCache.Conversation.Topic})
|
||||
c := localCache.Conversation
|
||||
log.ZDebug(context.Background(), "black local cache init", "Topic", c.Topic, "SlotNum", c.SlotNum, "SlotSize", c.SlotSize, "enable", c.Enable())
|
||||
return &ConversationRedisCache{
|
||||
BatchDeleter: batchHandler,
|
||||
rcClient: rockscache.NewClient(rdb, *opts),
|
||||
conversationDB: db,
|
||||
expireTime: conversationExpireTime,
|
||||
}
|
||||
}
|
||||
|
||||
type ConversationRedisCache struct {
|
||||
cache.BatchDeleter
|
||||
rcClient *rockscache.Client
|
||||
conversationDB database.Conversation
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) CloneConversationCache() cache.ConversationCache {
|
||||
return &ConversationRedisCache{
|
||||
BatchDeleter: c.BatchDeleter.Clone(),
|
||||
rcClient: c.rcClient,
|
||||
conversationDB: c.conversationDB,
|
||||
expireTime: c.expireTime,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationKey(ownerUserID, conversationID string) string {
|
||||
return cachekey.GetConversationKey(ownerUserID, conversationID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationIDsKey(ownerUserID string) string {
|
||||
return cachekey.GetConversationIDsKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getSuperGroupRecvNotNotifyUserIDsKey(groupID string) string {
|
||||
return cachekey.GetSuperGroupRecvNotNotifyUserIDsKey(groupID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getRecvMsgOptKey(ownerUserID, conversationID string) string {
|
||||
return cachekey.GetRecvMsgOptKey(ownerUserID, conversationID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getSuperGroupRecvNotNotifyUserIDsHashKey(groupID string) string {
|
||||
return cachekey.GetSuperGroupRecvNotNotifyUserIDsHashKey(groupID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationHasReadSeqKey(ownerUserID, conversationID string) string {
|
||||
return cachekey.GetConversationHasReadSeqKey(ownerUserID, conversationID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationNotReceiveMessageUserIDsKey(conversationID string) string {
|
||||
return cachekey.GetConversationNotReceiveMessageUserIDsKey(conversationID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getUserConversationIDsHashKey(ownerUserID string) string {
|
||||
return cachekey.GetUserConversationIDsHashKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserConversationIDs(ctx context.Context, ownerUserID string) ([]string, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationIDsKey(ownerUserID), c.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.FindUserIDAllConversationID(ctx, ownerUserID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversationIDs(userIDs ...string) cache.ConversationCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, c.getConversationIDsKey(userID))
|
||||
}
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserConversationIDsHash(ctx context.Context, ownerUserID string) (hash uint64, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
c.rcClient,
|
||||
c.getUserConversationIDsHashKey(ownerUserID),
|
||||
c.expireTime,
|
||||
func(ctx context.Context) (uint64, error) {
|
||||
conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
datautil.Sort(conversationIDs, true)
|
||||
bi := big.NewInt(0)
|
||||
bi.SetString(encrypt.Md5(strings.Join(conversationIDs, ";"))[0:8], 16)
|
||||
return bi.Uint64(), nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserConversationIDsHash(ownerUserIDs ...string) cache.ConversationCache {
|
||||
keys := make([]string, 0, len(ownerUserIDs))
|
||||
for _, ownerUserID := range ownerUserIDs {
|
||||
keys = append(keys, c.getUserConversationIDsHashKey(ownerUserID))
|
||||
}
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversation(ctx context.Context, ownerUserID, conversationID string) (*model.Conversation, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationKey(ownerUserID, conversationID), c.expireTime, func(ctx context.Context) (*model.Conversation, error) {
|
||||
return c.conversationDB.Take(ctx, ownerUserID, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversations(ownerUserID string, conversationIDs ...string) cache.ConversationCache {
|
||||
keys := make([]string, 0, len(conversationIDs))
|
||||
for _, conversationID := range conversationIDs {
|
||||
keys = append(keys, c.getConversationKey(ownerUserID, conversationID))
|
||||
}
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*model.Conversation, error) {
|
||||
return batchGetCache(ctx, c.rcClient, c.expireTime, conversationIDs, func(conversationID string) string {
|
||||
return c.getConversationKey(ownerUserID, conversationID)
|
||||
}, func(ctx context.Context, conversationID string) (*model.Conversation, error) {
|
||||
return c.conversationDB.Take(ctx, ownerUserID, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserAllConversations(ctx context.Context, ownerUserID string) ([]*model.Conversation, error) {
|
||||
conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.GetConversations(ctx, ownerUserID, conversationIDs)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserRecvMsgOpt(ctx context.Context, ownerUserID, conversationID string) (opt int, err error) {
|
||||
return getCache(ctx, c.rcClient, c.getRecvMsgOptKey(ownerUserID, conversationID), c.expireTime, func(ctx context.Context) (opt int, err error) {
|
||||
return c.conversationDB.GetUserRecvMsgOpt(ctx, ownerUserID, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUsersConversation(conversationID string, ownerUserIDs ...string) cache.ConversationCache {
|
||||
keys := make([]string, 0, len(ownerUserIDs))
|
||||
for _, ownerUserID := range ownerUserIDs {
|
||||
keys = append(keys, c.getConversationKey(ownerUserID, conversationID))
|
||||
}
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserRecvMsgOpt(ownerUserID, conversationID string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(c.getRecvMsgOptKey(ownerUserID, conversationID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDs(groupID string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsKey(groupID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsHashKey(groupID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserAllHasReadSeqs(ownerUserID string, conversationIDs ...string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
for _, conversationID := range conversationIDs {
|
||||
cache.AddKeys(c.getConversationHasReadSeqKey(ownerUserID, conversationID))
|
||||
}
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationNotReceiveMessageUserIDsKey(conversationID), c.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.GetConversationNotReceiveMessageUserIDs(ctx, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
for _, conversationID := range conversationIDs {
|
||||
cache.AddKeys(c.getConversationNotReceiveMessageUserIDsKey(conversationID))
|
||||
}
|
||||
|
||||
return cache
|
||||
}
|
||||
15
pkg/common/storage/cache/redis/doc.go
vendored
Normal file
15
pkg/common/storage/cache/redis/doc.go
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
@ -12,75 +12,54 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
friendExpireTime = time.Second * 60 * 60 * 12
|
||||
// FriendIDsKey = "FRIEND_IDS:"
|
||||
// TwoWayFriendsIDsKey = "COMMON_FRIENDS_IDS:"
|
||||
// friendKey = "FRIEND_INFO:".
|
||||
)
|
||||
|
||||
// FriendCache is an interface for caching friend-related data.
|
||||
type FriendCache interface {
|
||||
metaCache
|
||||
NewCache() FriendCache
|
||||
GetFriendIDs(ctx context.Context, ownerUserID string) (friendIDs []string, err error)
|
||||
// Called when friendID list changed
|
||||
DelFriendIDs(ownerUserID ...string) FriendCache
|
||||
// Get single friendInfo from the cache
|
||||
GetFriend(ctx context.Context, ownerUserID, friendUserID string) (friend *relationtb.FriendModel, err error)
|
||||
// Delete friend when friend info changed
|
||||
DelFriend(ownerUserID, friendUserID string) FriendCache
|
||||
// Delete friends when friends' info changed
|
||||
DelFriends(ownerUserID string, friendUserIDs []string) FriendCache
|
||||
}
|
||||
|
||||
// FriendCacheRedis is an implementation of the FriendCache interface using Redis.
|
||||
type FriendCacheRedis struct {
|
||||
metaCache
|
||||
friendDB relationtb.FriendModelInterface
|
||||
cache.BatchDeleter
|
||||
friendDB database.Friend
|
||||
expireTime time.Duration
|
||||
rcClient *rockscache.Client
|
||||
}
|
||||
|
||||
// NewFriendCacheRedis creates a new instance of FriendCacheRedis.
|
||||
func NewFriendCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, friendDB relationtb.FriendModelInterface,
|
||||
options rockscache.Options) FriendCache {
|
||||
rcClient := rockscache.NewClient(rdb, options)
|
||||
mc := NewMetaCacheRedis(rcClient)
|
||||
func NewFriendCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, friendDB database.Friend,
|
||||
options *rockscache.Options) cache.FriendCache {
|
||||
batchHandler := NewBatchDeleterRedis(rdb, options, []string{localCache.Friend.Topic})
|
||||
f := localCache.Friend
|
||||
log.ZDebug(context.Background(), "friend local cache init", "Topic", f.Topic, "SlotNum", f.SlotNum, "SlotSize", f.SlotSize, "enable", f.Enable())
|
||||
mc.SetTopic(f.Topic)
|
||||
mc.SetRawRedisClient(rdb)
|
||||
return &FriendCacheRedis{
|
||||
metaCache: mc,
|
||||
friendDB: friendDB,
|
||||
expireTime: friendExpireTime,
|
||||
rcClient: rcClient,
|
||||
BatchDeleter: batchHandler,
|
||||
friendDB: friendDB,
|
||||
expireTime: friendExpireTime,
|
||||
rcClient: rockscache.NewClient(rdb, *options),
|
||||
}
|
||||
}
|
||||
|
||||
// NewCache creates a new instance of FriendCacheRedis with the same configuration.
|
||||
func (f *FriendCacheRedis) NewCache() FriendCache {
|
||||
func (f *FriendCacheRedis) CloneFriendCache() cache.FriendCache {
|
||||
return &FriendCacheRedis{
|
||||
rcClient: f.rcClient,
|
||||
metaCache: f.Copy(),
|
||||
friendDB: f.friendDB,
|
||||
expireTime: f.expireTime,
|
||||
BatchDeleter: f.BatchDeleter.Clone(),
|
||||
friendDB: f.friendDB,
|
||||
expireTime: f.expireTime,
|
||||
rcClient: f.rcClient,
|
||||
}
|
||||
}
|
||||
|
||||
@ -107,15 +86,15 @@ func (f *FriendCacheRedis) GetFriendIDs(ctx context.Context, ownerUserID string)
|
||||
}
|
||||
|
||||
// DelFriendIDs deletes friend IDs from the cache.
|
||||
func (f *FriendCacheRedis) DelFriendIDs(ownerUserIDs ...string) FriendCache {
|
||||
newGroupCache := f.NewCache()
|
||||
func (f *FriendCacheRedis) DelFriendIDs(ownerUserIDs ...string) cache.FriendCache {
|
||||
newFriendCache := f.CloneFriendCache()
|
||||
keys := make([]string, 0, len(ownerUserIDs))
|
||||
for _, userID := range ownerUserIDs {
|
||||
keys = append(keys, f.getFriendIDsKey(userID))
|
||||
}
|
||||
newGroupCache.AddKeys(keys...)
|
||||
newFriendCache.AddKeys(keys...)
|
||||
|
||||
return newGroupCache
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
// GetTwoWayFriendIDs retrieves two-way friend IDs from the cache.
|
||||
@ -138,32 +117,32 @@ func (f *FriendCacheRedis) GetTwoWayFriendIDs(ctx context.Context, ownerUserID s
|
||||
}
|
||||
|
||||
// DelTwoWayFriendIDs deletes two-way friend IDs from the cache.
|
||||
func (f *FriendCacheRedis) DelTwoWayFriendIDs(ctx context.Context, ownerUserID string) FriendCache {
|
||||
newFriendCache := f.NewCache()
|
||||
func (f *FriendCacheRedis) DelTwoWayFriendIDs(ctx context.Context, ownerUserID string) cache.FriendCache {
|
||||
newFriendCache := f.CloneFriendCache()
|
||||
newFriendCache.AddKeys(f.getTwoWayFriendsIDsKey(ownerUserID))
|
||||
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
// GetFriend retrieves friend info from the cache or the database if not found.
|
||||
func (f *FriendCacheRedis) GetFriend(ctx context.Context, ownerUserID, friendUserID string) (friend *relationtb.FriendModel, err error) {
|
||||
func (f *FriendCacheRedis) GetFriend(ctx context.Context, ownerUserID, friendUserID string) (friend *model.Friend, err error) {
|
||||
return getCache(ctx, f.rcClient, f.getFriendKey(ownerUserID,
|
||||
friendUserID), f.expireTime, func(ctx context.Context) (*relationtb.FriendModel, error) {
|
||||
friendUserID), f.expireTime, func(ctx context.Context) (*model.Friend, error) {
|
||||
return f.friendDB.Take(ctx, ownerUserID, friendUserID)
|
||||
})
|
||||
}
|
||||
|
||||
// DelFriend deletes friend info from the cache.
|
||||
func (f *FriendCacheRedis) DelFriend(ownerUserID, friendUserID string) FriendCache {
|
||||
newFriendCache := f.NewCache()
|
||||
func (f *FriendCacheRedis) DelFriend(ownerUserID, friendUserID string) cache.FriendCache {
|
||||
newFriendCache := f.CloneFriendCache()
|
||||
newFriendCache.AddKeys(f.getFriendKey(ownerUserID, friendUserID))
|
||||
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
// DelFriends deletes multiple friend infos from the cache.
|
||||
func (f *FriendCacheRedis) DelFriends(ownerUserID string, friendUserIDs []string) FriendCache {
|
||||
newFriendCache := f.NewCache()
|
||||
func (f *FriendCacheRedis) DelFriends(ownerUserID string, friendUserIDs []string) cache.FriendCache {
|
||||
newFriendCache := f.CloneFriendCache()
|
||||
|
||||
for _, friendUserID := range friendUserIDs {
|
||||
key := f.getFriendKey(ownerUserID, friendUserID)
|
||||
@ -12,110 +12,74 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/common"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
groupExpireTime = time.Second * 60 * 60 * 12
|
||||
)
|
||||
|
||||
type GroupHash interface {
|
||||
GetGroupHash(ctx context.Context, groupID string) (uint64, error)
|
||||
}
|
||||
|
||||
type GroupCache interface {
|
||||
metaCache
|
||||
NewCache() GroupCache
|
||||
GetGroupsInfo(ctx context.Context, groupIDs []string) (groups []*relationtb.GroupModel, err error)
|
||||
GetGroupInfo(ctx context.Context, groupID string) (group *relationtb.GroupModel, err error)
|
||||
DelGroupsInfo(groupIDs ...string) GroupCache
|
||||
|
||||
GetGroupMembersHash(ctx context.Context, groupID string) (hashCode uint64, err error)
|
||||
GetGroupMemberHashMap(ctx context.Context, groupIDs []string) (map[string]*relationtb.GroupSimpleUserID, error)
|
||||
DelGroupMembersHash(groupID string) GroupCache
|
||||
|
||||
GetGroupMemberIDs(ctx context.Context, groupID string) (groupMemberIDs []string, err error)
|
||||
GetGroupsMemberIDs(ctx context.Context, groupIDs []string) (groupMemberIDs map[string][]string, err error)
|
||||
|
||||
DelGroupMemberIDs(groupID string) GroupCache
|
||||
|
||||
GetJoinedGroupIDs(ctx context.Context, userID string) (joinedGroupIDs []string, err error)
|
||||
DelJoinedGroupID(userID ...string) GroupCache
|
||||
|
||||
GetGroupMemberInfo(ctx context.Context, groupID, userID string) (groupMember *relationtb.GroupMemberModel, err error)
|
||||
GetGroupMembersInfo(ctx context.Context, groupID string, userID []string) (groupMembers []*relationtb.GroupMemberModel, err error)
|
||||
GetAllGroupMembersInfo(ctx context.Context, groupID string) (groupMembers []*relationtb.GroupMemberModel, err error)
|
||||
GetGroupMembersPage(ctx context.Context, groupID string, userID []string, showNumber, pageNumber int32) (total uint32, groupMembers []*relationtb.GroupMemberModel, err error)
|
||||
FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) ([]*relationtb.GroupMemberModel, error)
|
||||
|
||||
GetGroupRoleLevelMemberIDs(ctx context.Context, groupID string, roleLevel int32) ([]string, error)
|
||||
GetGroupOwner(ctx context.Context, groupID string) (*relationtb.GroupMemberModel, error)
|
||||
GetGroupsOwner(ctx context.Context, groupIDs []string) ([]*relationtb.GroupMemberModel, error)
|
||||
DelGroupRoleLevel(groupID string, roleLevel []int32) GroupCache
|
||||
DelGroupAllRoleLevel(groupID string) GroupCache
|
||||
DelGroupMembersInfo(groupID string, userID ...string) GroupCache
|
||||
GetGroupRoleLevelMemberInfo(ctx context.Context, groupID string, roleLevel int32) ([]*relationtb.GroupMemberModel, error)
|
||||
GetGroupRolesLevelMemberInfo(ctx context.Context, groupID string, roleLevels []int32) ([]*relationtb.GroupMemberModel, error)
|
||||
GetGroupMemberNum(ctx context.Context, groupID string) (memberNum int64, err error)
|
||||
DelGroupsMemberNum(groupID ...string) GroupCache
|
||||
}
|
||||
var errIndex = errs.New("err index")
|
||||
|
||||
type GroupCacheRedis struct {
|
||||
metaCache
|
||||
groupDB relationtb.GroupModelInterface
|
||||
groupMemberDB relationtb.GroupMemberModelInterface
|
||||
groupRequestDB relationtb.GroupRequestModelInterface
|
||||
cache.BatchDeleter
|
||||
groupDB database.Group
|
||||
groupMemberDB database.GroupMember
|
||||
groupRequestDB database.GroupRequest
|
||||
expireTime time.Duration
|
||||
rcClient *rockscache.Client
|
||||
groupHash GroupHash
|
||||
groupHash cache.GroupHash
|
||||
}
|
||||
|
||||
func NewGroupCacheRedis(
|
||||
rdb redis.UniversalClient,
|
||||
localCache *config.LocalCache,
|
||||
groupDB relationtb.GroupModelInterface,
|
||||
groupMemberDB relationtb.GroupMemberModelInterface,
|
||||
groupRequestDB relationtb.GroupRequestModelInterface,
|
||||
hashCode GroupHash,
|
||||
opts rockscache.Options,
|
||||
) GroupCache {
|
||||
rcClient := rockscache.NewClient(rdb, opts)
|
||||
mc := NewMetaCacheRedis(rcClient)
|
||||
groupDB database.Group,
|
||||
groupMemberDB database.GroupMember,
|
||||
groupRequestDB database.GroupRequest,
|
||||
hashCode cache.GroupHash,
|
||||
opts *rockscache.Options,
|
||||
) cache.GroupCache {
|
||||
batchHandler := NewBatchDeleterRedis(rdb, opts, []string{localCache.Group.Topic})
|
||||
g := localCache.Group
|
||||
mc.SetTopic(g.Topic)
|
||||
log.ZDebug(context.Background(), "group local cache init", "Topic", g.Topic, "SlotNum", g.SlotNum, "SlotSize", g.SlotSize, "enable", g.Enable())
|
||||
mc.SetRawRedisClient(rdb)
|
||||
|
||||
return &GroupCacheRedis{
|
||||
rcClient: rcClient, expireTime: groupExpireTime,
|
||||
groupDB: groupDB, groupMemberDB: groupMemberDB, groupRequestDB: groupRequestDB,
|
||||
groupHash: hashCode,
|
||||
metaCache: mc,
|
||||
BatchDeleter: batchHandler,
|
||||
rcClient: rockscache.NewClient(rdb, *opts),
|
||||
expireTime: groupExpireTime,
|
||||
groupDB: groupDB,
|
||||
groupMemberDB: groupMemberDB,
|
||||
groupRequestDB: groupRequestDB,
|
||||
groupHash: hashCode,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) NewCache() GroupCache {
|
||||
func (g *GroupCacheRedis) CloneGroupCache() cache.GroupCache {
|
||||
return &GroupCacheRedis{
|
||||
BatchDeleter: g.BatchDeleter.Clone(),
|
||||
rcClient: g.rcClient,
|
||||
expireTime: g.expireTime,
|
||||
groupDB: g.groupDB,
|
||||
groupMemberDB: g.groupMemberDB,
|
||||
groupRequestDB: g.groupRequestDB,
|
||||
metaCache: g.Copy(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -147,7 +111,7 @@ func (g *GroupCacheRedis) getGroupRoleLevelMemberIDsKey(groupID string, roleLeve
|
||||
return cachekey.GetGroupRoleLevelMemberIDsKey(groupID, roleLevel)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupIndex(group *relationtb.GroupModel, keys []string) (int, error) {
|
||||
func (g *GroupCacheRedis) GetGroupIndex(group *model.Group, keys []string) (int, error) {
|
||||
key := g.getGroupInfoKey(group.GroupID)
|
||||
for i, _key := range keys {
|
||||
if _key == key {
|
||||
@ -158,7 +122,7 @@ func (g *GroupCacheRedis) GetGroupIndex(group *relationtb.GroupModel, keys []str
|
||||
return 0, errIndex
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMemberIndex(groupMember *relationtb.GroupMemberModel, keys []string) (int, error) {
|
||||
func (g *GroupCacheRedis) GetGroupMemberIndex(groupMember *model.GroupMember, keys []string) (int, error) {
|
||||
key := g.getGroupMemberInfoKey(groupMember.GroupID, groupMember.UserID)
|
||||
for i, _key := range keys {
|
||||
if _key == key {
|
||||
@ -169,22 +133,22 @@ func (g *GroupCacheRedis) GetGroupMemberIndex(groupMember *relationtb.GroupMembe
|
||||
return 0, errIndex
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupsInfo(ctx context.Context, groupIDs []string) (groups []*relationtb.GroupModel, err error) {
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
|
||||
func (g *GroupCacheRedis) GetGroupsInfo(ctx context.Context, groupIDs []string) (groups []*model.Group, err error) {
|
||||
return batchGetCache(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
|
||||
return g.getGroupInfoKey(groupID)
|
||||
}, func(ctx context.Context, groupID string) (*relationtb.GroupModel, error) {
|
||||
}, func(ctx context.Context, groupID string) (*model.Group, error) {
|
||||
return g.groupDB.Take(ctx, groupID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupInfo(ctx context.Context, groupID string) (group *relationtb.GroupModel, err error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupInfoKey(groupID), g.expireTime, func(ctx context.Context) (*relationtb.GroupModel, error) {
|
||||
func (g *GroupCacheRedis) GetGroupInfo(ctx context.Context, groupID string) (group *model.Group, err error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupInfoKey(groupID), g.expireTime, func(ctx context.Context) (*model.Group, error) {
|
||||
return g.groupDB.Take(ctx, groupID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupsInfo(groupIDs ...string) GroupCache {
|
||||
newGroupCache := g.NewCache()
|
||||
func (g *GroupCacheRedis) DelGroupsInfo(groupIDs ...string) cache.GroupCache {
|
||||
newGroupCache := g.CloneGroupCache()
|
||||
keys := make([]string, 0, len(groupIDs))
|
||||
for _, groupID := range groupIDs {
|
||||
keys = append(keys, g.getGroupInfoKey(groupID))
|
||||
@ -194,8 +158,8 @@ func (g *GroupCacheRedis) DelGroupsInfo(groupIDs ...string) GroupCache {
|
||||
return newGroupCache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupsOwner(groupIDs ...string) GroupCache {
|
||||
newGroupCache := g.NewCache()
|
||||
func (g *GroupCacheRedis) DelGroupsOwner(groupIDs ...string) cache.GroupCache {
|
||||
newGroupCache := g.CloneGroupCache()
|
||||
keys := make([]string, 0, len(groupIDs))
|
||||
for _, groupID := range groupIDs {
|
||||
keys = append(keys, g.getGroupRoleLevelMemberIDsKey(groupID, constant.GroupOwner))
|
||||
@ -205,8 +169,8 @@ func (g *GroupCacheRedis) DelGroupsOwner(groupIDs ...string) GroupCache {
|
||||
return newGroupCache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupRoleLevel(groupID string, roleLevels []int32) GroupCache {
|
||||
newGroupCache := g.NewCache()
|
||||
func (g *GroupCacheRedis) DelGroupRoleLevel(groupID string, roleLevels []int32) cache.GroupCache {
|
||||
newGroupCache := g.CloneGroupCache()
|
||||
keys := make([]string, 0, len(roleLevels))
|
||||
for _, roleLevel := range roleLevels {
|
||||
keys = append(keys, g.getGroupRoleLevelMemberIDsKey(groupID, roleLevel))
|
||||
@ -215,7 +179,7 @@ func (g *GroupCacheRedis) DelGroupRoleLevel(groupID string, roleLevels []int32)
|
||||
return newGroupCache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupAllRoleLevel(groupID string) GroupCache {
|
||||
func (g *GroupCacheRedis) DelGroupAllRoleLevel(groupID string) cache.GroupCache {
|
||||
return g.DelGroupRoleLevel(groupID, []int32{constant.GroupOwner, constant.GroupAdmin, constant.GroupOrdinaryUsers})
|
||||
}
|
||||
|
||||
@ -228,11 +192,11 @@ func (g *GroupCacheRedis) GetGroupMembersHash(ctx context.Context, groupID strin
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMemberHashMap(ctx context.Context, groupIDs []string) (map[string]*relationtb.GroupSimpleUserID, error) {
|
||||
func (g *GroupCacheRedis) GetGroupMemberHashMap(ctx context.Context, groupIDs []string) (map[string]*common.GroupSimpleUserID, error) {
|
||||
if g.groupHash == nil {
|
||||
return nil, errs.ErrInternalServer.WrapMsg("group hash is nil")
|
||||
}
|
||||
res := make(map[string]*relationtb.GroupSimpleUserID)
|
||||
res := make(map[string]*common.GroupSimpleUserID)
|
||||
for _, groupID := range groupIDs {
|
||||
hash, err := g.GetGroupMembersHash(ctx, groupID)
|
||||
if err != nil {
|
||||
@ -243,14 +207,14 @@ func (g *GroupCacheRedis) GetGroupMemberHashMap(ctx context.Context, groupIDs []
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[groupID] = &relationtb.GroupSimpleUserID{Hash: hash, MemberNum: uint32(num)}
|
||||
res[groupID] = &common.GroupSimpleUserID{Hash: hash, MemberNum: uint32(num)}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupMembersHash(groupID string) GroupCache {
|
||||
cache := g.NewCache()
|
||||
func (g *GroupCacheRedis) DelGroupMembersHash(groupID string) cache.GroupCache {
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(g.getGroupMembersHashKey(groupID))
|
||||
|
||||
return cache
|
||||
@ -275,8 +239,8 @@ func (g *GroupCacheRedis) GetGroupsMemberIDs(ctx context.Context, groupIDs []str
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupMemberIDs(groupID string) GroupCache {
|
||||
cache := g.NewCache()
|
||||
func (g *GroupCacheRedis) DelGroupMemberIDs(groupID string) cache.GroupCache {
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(g.getGroupMemberIDsKey(groupID))
|
||||
|
||||
return cache
|
||||
@ -288,27 +252,27 @@ func (g *GroupCacheRedis) GetJoinedGroupIDs(ctx context.Context, userID string)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelJoinedGroupID(userIDs ...string) GroupCache {
|
||||
func (g *GroupCacheRedis) DelJoinedGroupID(userIDs ...string) cache.GroupCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, g.getJoinedGroupsKey(userID))
|
||||
}
|
||||
cache := g.NewCache()
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMemberInfo(ctx context.Context, groupID, userID string) (groupMember *relationtb.GroupMemberModel, err error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupMemberInfoKey(groupID, userID), g.expireTime, func(ctx context.Context) (*relationtb.GroupMemberModel, error) {
|
||||
func (g *GroupCacheRedis) GetGroupMemberInfo(ctx context.Context, groupID, userID string) (groupMember *model.GroupMember, err error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupMemberInfoKey(groupID, userID), g.expireTime, func(ctx context.Context) (*model.GroupMember, error) {
|
||||
return g.groupMemberDB.Take(ctx, groupID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]*relationtb.GroupMemberModel, error) {
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, userIDs, func(userID string) string {
|
||||
func (g *GroupCacheRedis) GetGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]*model.GroupMember, error) {
|
||||
return batchGetCache(ctx, g.rcClient, g.expireTime, userIDs, func(userID string) string {
|
||||
return g.getGroupMemberInfoKey(groupID, userID)
|
||||
}, func(ctx context.Context, userID string) (*relationtb.GroupMemberModel, error) {
|
||||
}, func(ctx context.Context, userID string) (*model.GroupMember, error) {
|
||||
return g.groupMemberDB.Take(ctx, groupID, userID)
|
||||
})
|
||||
}
|
||||
@ -318,7 +282,7 @@ func (g *GroupCacheRedis) GetGroupMembersPage(
|
||||
groupID string,
|
||||
userIDs []string,
|
||||
showNumber, pageNumber int32,
|
||||
) (total uint32, groupMembers []*relationtb.GroupMemberModel, err error) {
|
||||
) (total uint32, groupMembers []*model.GroupMember, err error) {
|
||||
groupMemberIDs, err := g.GetGroupMemberIDs(ctx, groupID)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
@ -333,7 +297,7 @@ func (g *GroupCacheRedis) GetGroupMembersPage(
|
||||
return uint32(len(userIDs)), groupMembers, err
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetAllGroupMembersInfo(ctx context.Context, groupID string) (groupMembers []*relationtb.GroupMemberModel, err error) {
|
||||
func (g *GroupCacheRedis) GetAllGroupMembersInfo(ctx context.Context, groupID string) (groupMembers []*model.GroupMember, err error) {
|
||||
groupMemberIDs, err := g.GetGroupMemberIDs(ctx, groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -342,7 +306,7 @@ func (g *GroupCacheRedis) GetAllGroupMembersInfo(ctx context.Context, groupID st
|
||||
return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetAllGroupMemberInfo(ctx context.Context, groupID string) ([]*relationtb.GroupMemberModel, error) {
|
||||
func (g *GroupCacheRedis) GetAllGroupMemberInfo(ctx context.Context, groupID string) ([]*model.GroupMember, error) {
|
||||
groupMemberIDs, err := g.GetGroupMemberIDs(ctx, groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -350,12 +314,12 @@ func (g *GroupCacheRedis) GetAllGroupMemberInfo(ctx context.Context, groupID str
|
||||
return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupMembersInfo(groupID string, userIDs ...string) GroupCache {
|
||||
func (g *GroupCacheRedis) DelGroupMembersInfo(groupID string, userIDs ...string) cache.GroupCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, g.getGroupMemberInfoKey(groupID, userID))
|
||||
}
|
||||
cache := g.NewCache()
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
@ -367,18 +331,18 @@ func (g *GroupCacheRedis) GetGroupMemberNum(ctx context.Context, groupID string)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupsMemberNum(groupID ...string) GroupCache {
|
||||
func (g *GroupCacheRedis) DelGroupsMemberNum(groupID ...string) cache.GroupCache {
|
||||
keys := make([]string, 0, len(groupID))
|
||||
for _, groupID := range groupID {
|
||||
keys = append(keys, g.getGroupMemberNumKey(groupID))
|
||||
}
|
||||
cache := g.NewCache()
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupOwner(ctx context.Context, groupID string) (*relationtb.GroupMemberModel, error) {
|
||||
func (g *GroupCacheRedis) GetGroupOwner(ctx context.Context, groupID string) (*model.GroupMember, error) {
|
||||
members, err := g.GetGroupRoleLevelMemberInfo(ctx, groupID, constant.GroupOwner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -389,8 +353,8 @@ func (g *GroupCacheRedis) GetGroupOwner(ctx context.Context, groupID string) (*r
|
||||
return members[0], nil
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupsOwner(ctx context.Context, groupIDs []string) ([]*relationtb.GroupMemberModel, error) {
|
||||
members := make([]*relationtb.GroupMemberModel, 0, len(groupIDs))
|
||||
func (g *GroupCacheRedis) GetGroupsOwner(ctx context.Context, groupIDs []string) ([]*model.GroupMember, error) {
|
||||
members := make([]*model.GroupMember, 0, len(groupIDs))
|
||||
for _, groupID := range groupIDs {
|
||||
items, err := g.GetGroupRoleLevelMemberInfo(ctx, groupID, constant.GroupOwner)
|
||||
if err != nil {
|
||||
@ -409,7 +373,7 @@ func (g *GroupCacheRedis) GetGroupRoleLevelMemberIDs(ctx context.Context, groupI
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupRoleLevelMemberInfo(ctx context.Context, groupID string, roleLevel int32) ([]*relationtb.GroupMemberModel, error) {
|
||||
func (g *GroupCacheRedis) GetGroupRoleLevelMemberInfo(ctx context.Context, groupID string, roleLevel int32) ([]*model.GroupMember, error) {
|
||||
userIDs, err := g.GetGroupRoleLevelMemberIDs(ctx, groupID, roleLevel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -417,7 +381,7 @@ func (g *GroupCacheRedis) GetGroupRoleLevelMemberInfo(ctx context.Context, group
|
||||
return g.GetGroupMembersInfo(ctx, groupID, userIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupRolesLevelMemberInfo(ctx context.Context, groupID string, roleLevels []int32) ([]*relationtb.GroupMemberModel, error) {
|
||||
func (g *GroupCacheRedis) GetGroupRolesLevelMemberInfo(ctx context.Context, groupID string, roleLevels []int32) ([]*model.GroupMember, error) {
|
||||
var userIDs []string
|
||||
for _, roleLevel := range roleLevels {
|
||||
ids, err := g.GetGroupRoleLevelMemberIDs(ctx, groupID, roleLevel)
|
||||
@ -429,16 +393,16 @@ func (g *GroupCacheRedis) GetGroupRolesLevelMemberInfo(ctx context.Context, grou
|
||||
return g.GetGroupMembersInfo(ctx, groupID, userIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) (_ []*relationtb.GroupMemberModel, err error) {
|
||||
func (g *GroupCacheRedis) FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) (_ []*model.GroupMember, err error) {
|
||||
if len(groupIDs) == 0 {
|
||||
groupIDs, err = g.GetJoinedGroupIDs(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
|
||||
return batchGetCache(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
|
||||
return g.getGroupMemberInfoKey(groupID, userID)
|
||||
}, func(ctx context.Context, groupID string) (*relationtb.GroupMemberModel, error) {
|
||||
}, func(ctx context.Context, groupID string) (*model.GroupMember, error) {
|
||||
return g.groupMemberDB.Take(ctx, groupID, userID)
|
||||
})
|
||||
}
|
||||
125
pkg/common/storage/cache/redis/lua_script.go
vendored
Normal file
125
pkg/common/storage/cache/redis/lua_script.go
vendored
Normal file
@ -0,0 +1,125 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
var (
|
||||
setBatchWithCommonExpireScript = redis.NewScript(`
|
||||
local expire = tonumber(ARGV[1])
|
||||
for i, key in ipairs(KEYS) do
|
||||
redis.call('SET', key, ARGV[i + 1])
|
||||
redis.call('EXPIRE', key, expire)
|
||||
end
|
||||
return #KEYS
|
||||
`)
|
||||
|
||||
setBatchWithIndividualExpireScript = redis.NewScript(`
|
||||
local n = #KEYS
|
||||
for i = 1, n do
|
||||
redis.call('SET', KEYS[i], ARGV[i])
|
||||
redis.call('EXPIRE', KEYS[i], ARGV[i + n])
|
||||
end
|
||||
return n
|
||||
`)
|
||||
|
||||
deleteBatchScript = redis.NewScript(`
|
||||
for i, key in ipairs(KEYS) do
|
||||
redis.call('DEL', key)
|
||||
end
|
||||
return #KEYS
|
||||
`)
|
||||
|
||||
getBatchScript = redis.NewScript(`
|
||||
local values = {}
|
||||
for i, key in ipairs(KEYS) do
|
||||
local value = redis.call('GET', key)
|
||||
table.insert(values, value)
|
||||
end
|
||||
return values
|
||||
`)
|
||||
)
|
||||
|
||||
func callLua(ctx context.Context, rdb redis.Scripter, script *redis.Script, keys []string, args []any) (any, error) {
|
||||
log.ZDebug(ctx, "callLua args", "scriptHash", script.Hash(), "keys", keys, "args", args)
|
||||
r := script.EvalSha(ctx, rdb, keys, args)
|
||||
if redis.HasErrorPrefix(r.Err(), "NOSCRIPT") {
|
||||
if err := script.Load(ctx, rdb).Err(); err != nil {
|
||||
r = script.Eval(ctx, rdb, keys, args)
|
||||
} else {
|
||||
r = script.EvalSha(ctx, rdb, keys, args)
|
||||
}
|
||||
}
|
||||
v, err := r.Result()
|
||||
if err == redis.Nil {
|
||||
err = nil
|
||||
}
|
||||
return v, errs.WrapMsg(err, "call lua err", "scriptHash", script.Hash(), "keys", keys, "args", args)
|
||||
}
|
||||
|
||||
func LuaSetBatchWithCommonExpire(ctx context.Context, rdb redis.Scripter, keys []string, values []string, expire int) error {
|
||||
// Check if the lengths of keys and values match
|
||||
if len(keys) != len(values) {
|
||||
return errs.New("keys and values length mismatch").Wrap()
|
||||
}
|
||||
|
||||
// Ensure allocation size does not overflow
|
||||
maxAllowedLen := (1 << 31) - 1 // 2GB limit (maximum address space for 32-bit systems)
|
||||
|
||||
if len(values) > maxAllowedLen-1 {
|
||||
return fmt.Errorf("values length is too large, causing overflow")
|
||||
}
|
||||
var vals = make([]any, 0, 1+len(values))
|
||||
vals = append(vals, expire)
|
||||
for _, v := range values {
|
||||
vals = append(vals, v)
|
||||
}
|
||||
_, err := callLua(ctx, rdb, setBatchWithCommonExpireScript, keys, vals)
|
||||
return err
|
||||
}
|
||||
|
||||
func LuaSetBatchWithIndividualExpire(ctx context.Context, rdb redis.Scripter, keys []string, values []string, expires []int) error {
|
||||
// Check if the lengths of keys, values, and expires match
|
||||
if len(keys) != len(values) || len(keys) != len(expires) {
|
||||
return errs.New("keys and values length mismatch").Wrap()
|
||||
}
|
||||
|
||||
// Ensure the allocation size does not overflow
|
||||
maxAllowedLen := (1 << 31) - 1 // 2GB limit (maximum address space for 32-bit systems)
|
||||
|
||||
if len(values) > maxAllowedLen-1 {
|
||||
return errs.New(fmt.Sprintf("values length %d exceeds the maximum allowed length %d", len(values), maxAllowedLen-1)).Wrap()
|
||||
}
|
||||
var vals = make([]any, 0, len(values)+len(expires))
|
||||
for _, v := range values {
|
||||
vals = append(vals, v)
|
||||
}
|
||||
for _, ex := range expires {
|
||||
vals = append(vals, ex)
|
||||
}
|
||||
_, err := callLua(ctx, rdb, setBatchWithIndividualExpireScript, keys, vals)
|
||||
return err
|
||||
}
|
||||
|
||||
func LuaDeleteBatch(ctx context.Context, rdb redis.Scripter, keys []string) error {
|
||||
_, err := callLua(ctx, rdb, deleteBatchScript, keys, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func LuaGetBatch(ctx context.Context, rdb redis.Scripter, keys []string) ([]any, error) {
|
||||
v, err := callLua(ctx, rdb, getBatchScript, keys, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values, ok := v.([]any)
|
||||
if !ok {
|
||||
return nil, servererrs.ErrArgs.WrapMsg("invalid lua get batch result")
|
||||
}
|
||||
return values, nil
|
||||
|
||||
}
|
||||
75
pkg/common/storage/cache/redis/lua_script_test.go
vendored
Normal file
75
pkg/common/storage/cache/redis/lua_script_test.go
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/go-redis/redismock/v9"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLuaSetBatchWithCommonExpire(t *testing.T) {
|
||||
rdb, mock := redismock.NewClientMock()
|
||||
ctx := context.Background()
|
||||
|
||||
keys := []string{"key1", "key2"}
|
||||
values := []string{"value1", "value2"}
|
||||
expire := 10
|
||||
|
||||
mock.ExpectEvalSha(setBatchWithCommonExpireScript.Hash(), keys, []any{expire, "value1", "value2"}).SetVal(int64(len(keys)))
|
||||
|
||||
err := LuaSetBatchWithCommonExpire(ctx, rdb, keys, values, expire)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestLuaSetBatchWithIndividualExpire(t *testing.T) {
|
||||
rdb, mock := redismock.NewClientMock()
|
||||
ctx := context.Background()
|
||||
|
||||
keys := []string{"key1", "key2"}
|
||||
values := []string{"value1", "value2"}
|
||||
expires := []int{10, 20}
|
||||
|
||||
args := make([]any, 0, len(values)+len(expires))
|
||||
for _, v := range values {
|
||||
args = append(args, v)
|
||||
}
|
||||
for _, ex := range expires {
|
||||
args = append(args, ex)
|
||||
}
|
||||
|
||||
mock.ExpectEvalSha(setBatchWithIndividualExpireScript.Hash(), keys, args).SetVal(int64(len(keys)))
|
||||
|
||||
err := LuaSetBatchWithIndividualExpire(ctx, rdb, keys, values, expires)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestLuaDeleteBatch(t *testing.T) {
|
||||
rdb, mock := redismock.NewClientMock()
|
||||
ctx := context.Background()
|
||||
|
||||
keys := []string{"key1", "key2"}
|
||||
|
||||
mock.ExpectEvalSha(deleteBatchScript.Hash(), keys, []any{}).SetVal(int64(len(keys)))
|
||||
|
||||
err := LuaDeleteBatch(ctx, rdb, keys)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestLuaGetBatch(t *testing.T) {
|
||||
rdb, mock := redismock.NewClientMock()
|
||||
ctx := context.Background()
|
||||
|
||||
keys := []string{"key1", "key2"}
|
||||
expectedValues := []any{"value1", "value2"}
|
||||
|
||||
mock.ExpectEvalSha(getBatchScript.Hash(), keys, []any{}).SetVal(expectedValues)
|
||||
|
||||
values, err := LuaGetBatch(ctx, rdb, keys)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
assert.Equal(t, expectedValues, values)
|
||||
}
|
||||
187
pkg/common/storage/cache/redis/msg.go
vendored
Normal file
187
pkg/common/storage/cache/redis/msg.go
vendored
Normal file
@ -0,0 +1,187 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
) //
|
||||
|
||||
// msgCacheTimeout is expiration time of message cache, 86400 seconds
|
||||
const msgCacheTimeout = 86400
|
||||
|
||||
func NewMsgCache(client redis.UniversalClient) cache.MsgCache {
|
||||
return &msgCache{rdb: client}
|
||||
}
|
||||
|
||||
type msgCache struct {
|
||||
rdb redis.UniversalClient
|
||||
}
|
||||
|
||||
func (c *msgCache) getMessageCacheKey(conversationID string, seq int64) string {
|
||||
return cachekey.GetMessageCacheKey(conversationID, seq)
|
||||
}
|
||||
func (c *msgCache) getMessageDelUserListKey(conversationID string, seq int64) string {
|
||||
return cachekey.GetMessageDelUserListKey(conversationID, seq)
|
||||
}
|
||||
|
||||
func (c *msgCache) getUserDelList(conversationID, userID string) string {
|
||||
return cachekey.GetUserDelListKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (c *msgCache) getSendMsgKey(id string) string {
|
||||
return cachekey.GetSendMsgKey(id)
|
||||
}
|
||||
|
||||
func (c *msgCache) getLockMessageTypeKey(clientMsgID string, TypeKey string) string {
|
||||
return cachekey.GetLockMessageTypeKey(clientMsgID, TypeKey)
|
||||
}
|
||||
|
||||
func (c *msgCache) getMessageReactionExPrefix(clientMsgID string, sessionType int32) string {
|
||||
return cachekey.GetMessageReactionExKey(clientMsgID, sessionType)
|
||||
}
|
||||
|
||||
func (c *msgCache) SetMessagesToCache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (int, error) {
|
||||
msgMap := datautil.SliceToMap(msgs, func(msg *sdkws.MsgData) string {
|
||||
return c.getMessageCacheKey(conversationID, msg.Seq)
|
||||
})
|
||||
keys := datautil.Slice(msgs, func(msg *sdkws.MsgData) string {
|
||||
return c.getMessageCacheKey(conversationID, msg.Seq)
|
||||
})
|
||||
err := ProcessKeysBySlot(ctx, c.rdb, keys, func(ctx context.Context, slot int64, keys []string) error {
|
||||
var values []string
|
||||
for _, key := range keys {
|
||||
if msg, ok := msgMap[key]; ok {
|
||||
s, err := msgprocessor.Pb2String(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
values = append(values, s)
|
||||
}
|
||||
}
|
||||
return LuaSetBatchWithCommonExpire(ctx, c.rdb, keys, values, msgCacheTimeout)
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(msgs), nil
|
||||
}
|
||||
|
||||
func (c *msgCache) DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error {
|
||||
var keys []string
|
||||
for _, seq := range seqs {
|
||||
keys = append(keys, c.getMessageCacheKey(conversationID, seq))
|
||||
}
|
||||
|
||||
return ProcessKeysBySlot(ctx, c.rdb, keys, func(ctx context.Context, slot int64, keys []string) error {
|
||||
return LuaDeleteBatch(ctx, c.rdb, keys)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *msgCache) SetSendMsgStatus(ctx context.Context, id string, status int32) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, c.getSendMsgKey(id), status, time.Hour*24).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, error) {
|
||||
result, err := c.rdb.Get(ctx, c.getSendMsgKey(id)).Int()
|
||||
return int32(result), errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *msgCache) LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error {
|
||||
key := c.getLockMessageTypeKey(clientMsgID, TypeKey)
|
||||
return errs.Wrap(c.rdb.SetNX(ctx, key, 1, time.Minute).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) UnLockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error {
|
||||
key := c.getLockMessageTypeKey(clientMsgID, TypeKey)
|
||||
return errs.Wrap(c.rdb.Del(ctx, key).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) JudgeMessageReactionExist(ctx context.Context, clientMsgID string, sessionType int32) (bool, error) {
|
||||
n, err := c.rdb.Exists(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType)).Result()
|
||||
if err != nil {
|
||||
return false, errs.Wrap(err)
|
||||
}
|
||||
|
||||
return n > 0, nil
|
||||
}
|
||||
|
||||
func (c *msgCache) SetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey, value string) error {
|
||||
return errs.Wrap(c.rdb.HSet(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), typeKey, value).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) SetMessageReactionExpire(ctx context.Context, clientMsgID string, sessionType int32, expiration time.Duration) (bool, error) {
|
||||
val, err := c.rdb.Expire(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), expiration).Result()
|
||||
return val, errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *msgCache) GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error) {
|
||||
val, err := c.rdb.HGet(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), typeKey).Result()
|
||||
return val, errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *msgCache) GetOneMessageAllReactionList(ctx context.Context, clientMsgID string, sessionType int32) (map[string]string, error) {
|
||||
val, err := c.rdb.HGetAll(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType)).Result()
|
||||
return val, errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *msgCache) DeleteOneMessageKey(ctx context.Context, clientMsgID string, sessionType int32, subKey string) error {
|
||||
return errs.Wrap(c.rdb.HDel(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), subKey).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) GetMessagesBySeq(ctx context.Context, conversationID string, seqs []int64) (seqMsgs []*sdkws.MsgData, failedSeqs []int64, err error) {
|
||||
var keys []string
|
||||
keySeqMap := make(map[string]int64, 10)
|
||||
for _, seq := range seqs {
|
||||
key := c.getMessageCacheKey(conversationID, seq)
|
||||
keys = append(keys, key)
|
||||
keySeqMap[key] = seq
|
||||
}
|
||||
err = ProcessKeysBySlot(ctx, c.rdb, keys, func(ctx context.Context, slot int64, keys []string) error {
|
||||
result, err := LuaGetBatch(ctx, c.rdb, keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i, value := range result {
|
||||
seq := keySeqMap[keys[i]]
|
||||
if value == nil {
|
||||
failedSeqs = append(failedSeqs, seq)
|
||||
continue
|
||||
}
|
||||
|
||||
msg := &sdkws.MsgData{}
|
||||
msgString, ok := value.(string)
|
||||
if !ok || msgprocessor.String2Pb(msgString, msg) != nil {
|
||||
failedSeqs = append(failedSeqs, seq)
|
||||
continue
|
||||
}
|
||||
seqMsgs = append(seqMsgs, msg)
|
||||
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return seqMsgs, failedSeqs, nil
|
||||
|
||||
}
|
||||
133
pkg/common/storage/cache/redis/msg_test.go
vendored
Normal file
133
pkg/common/storage/cache/redis/msg_test.go
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_msgCache_SetMessagesToCache(t *testing.T) {
|
||||
type fields struct {
|
||||
rdb redis.UniversalClient
|
||||
}
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
conversationID string
|
||||
msgs []*sdkws.MsgData
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want int
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{"test1", fields{rdb: redis.NewClient(&redis.Options{Addr: "localhost:16379", Username: "", Password: "openIM123", DB: 0})}, args{context.Background(),
|
||||
"cid", []*sdkws.MsgData{{Seq: 1}, {Seq: 2}, {Seq: 3}}}, 3, assert.NoError},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := &msgCache{
|
||||
rdb: tt.fields.rdb,
|
||||
}
|
||||
got, err := c.SetMessagesToCache(tt.args.ctx, tt.args.conversationID, tt.args.msgs)
|
||||
if !tt.wantErr(t, err, fmt.Sprintf("SetMessagesToCache(%v, %v, %v)", tt.args.ctx, tt.args.conversationID, tt.args.msgs)) {
|
||||
return
|
||||
}
|
||||
assert.Equalf(t, tt.want, got, "SetMessagesToCache(%v, %v, %v)", tt.args.ctx, tt.args.conversationID, tt.args.msgs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_msgCache_GetMessagesBySeq(t *testing.T) {
|
||||
type fields struct {
|
||||
rdb redis.UniversalClient
|
||||
}
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
conversationID string
|
||||
seqs []int64
|
||||
}
|
||||
var failedSeq []int64
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantSeqMsgs []*sdkws.MsgData
|
||||
wantFailedSeqs []int64
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{"test1", fields{rdb: redis.NewClient(&redis.Options{Addr: "localhost:16379", Password: "openIM123", DB: 0})},
|
||||
args{context.Background(), "cid", []int64{1, 2, 3}},
|
||||
[]*sdkws.MsgData{{Seq: 1}, {Seq: 2}, {Seq: 3}}, failedSeq, assert.NoError},
|
||||
{"test2", fields{rdb: redis.NewClient(&redis.Options{Addr: "localhost:16379", Password: "openIM123", DB: 0})},
|
||||
args{context.Background(), "cid", []int64{4, 5, 6}},
|
||||
nil, []int64{4, 5, 6}, assert.NoError},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := &msgCache{
|
||||
rdb: tt.fields.rdb,
|
||||
}
|
||||
gotSeqMsgs, gotFailedSeqs, err := c.GetMessagesBySeq(tt.args.ctx, tt.args.conversationID, tt.args.seqs)
|
||||
if !tt.wantErr(t, err, fmt.Sprintf("GetMessagesBySeq(%v, %v, %v)", tt.args.ctx, tt.args.conversationID, tt.args.seqs)) {
|
||||
return
|
||||
}
|
||||
equalMsgDataSlices(t, tt.wantSeqMsgs, gotSeqMsgs)
|
||||
assert.Equalf(t, tt.wantFailedSeqs, gotFailedSeqs, "GetMessagesBySeq(%v, %v, %v)", tt.args.ctx, tt.args.conversationID, tt.args.seqs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func equalMsgDataSlices(t *testing.T, expected, actual []*sdkws.MsgData) {
|
||||
assert.Equal(t, len(expected), len(actual), "Slices have different lengths")
|
||||
for i := range expected {
|
||||
assert.True(t, proto.Equal(expected[i], actual[i]), "Element %d not equal: expected %v, got %v", i, expected[i], actual[i])
|
||||
}
|
||||
}
|
||||
|
||||
func Test_msgCache_DeleteMessagesFromCache(t *testing.T) {
|
||||
type fields struct {
|
||||
rdb redis.UniversalClient
|
||||
}
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
conversationID string
|
||||
seqs []int64
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{"test1", fields{rdb: redis.NewClient(&redis.Options{Addr: "localhost:16379", Password: "openIM123"})},
|
||||
args{context.Background(), "cid", []int64{1, 2, 3}}, assert.NoError},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := &msgCache{
|
||||
rdb: tt.fields.rdb,
|
||||
}
|
||||
tt.wantErr(t, c.DeleteMessagesFromCache(tt.args.ctx, tt.args.conversationID, tt.args.seqs),
|
||||
fmt.Sprintf("DeleteMessagesFromCache(%v, %v, %v)", tt.args.ctx, tt.args.conversationID, tt.args.seqs))
|
||||
})
|
||||
}
|
||||
}
|
||||
197
pkg/common/storage/cache/redis/redis_shard_manager.go
vendored
Normal file
197
pkg/common/storage/cache/redis/redis_shard_manager.go
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBatchSize = 50
|
||||
defaultConcurrentLimit = 3
|
||||
)
|
||||
|
||||
// RedisShardManager is a class for sharding and processing keys
|
||||
type RedisShardManager struct {
|
||||
redisClient redis.UniversalClient
|
||||
config *Config
|
||||
}
|
||||
type Config struct {
|
||||
batchSize int
|
||||
continueOnError bool
|
||||
concurrentLimit int
|
||||
}
|
||||
|
||||
// Option is a function type for configuring Config
|
||||
type Option func(c *Config)
|
||||
|
||||
// NewRedisShardManager creates a new RedisShardManager instance
|
||||
func NewRedisShardManager(redisClient redis.UniversalClient, opts ...Option) *RedisShardManager {
|
||||
config := &Config{
|
||||
batchSize: defaultBatchSize, // Default batch size is 50 keys
|
||||
continueOnError: false,
|
||||
concurrentLimit: defaultConcurrentLimit, // Default concurrent limit is 3
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
}
|
||||
rsm := &RedisShardManager{
|
||||
redisClient: redisClient,
|
||||
config: config,
|
||||
}
|
||||
return rsm
|
||||
}
|
||||
|
||||
// WithBatchSize sets the number of keys to process per batch
|
||||
func WithBatchSize(size int) Option {
|
||||
return func(c *Config) {
|
||||
c.batchSize = size
|
||||
}
|
||||
}
|
||||
|
||||
// WithContinueOnError sets whether to continue processing on error
|
||||
func WithContinueOnError(continueOnError bool) Option {
|
||||
return func(c *Config) {
|
||||
c.continueOnError = continueOnError
|
||||
}
|
||||
}
|
||||
|
||||
// WithConcurrentLimit sets the concurrency limit
|
||||
func WithConcurrentLimit(limit int) Option {
|
||||
return func(c *Config) {
|
||||
c.concurrentLimit = limit
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessKeysBySlot groups keys by their Redis cluster hash slots and processes them using the provided function.
|
||||
func (rsm *RedisShardManager) ProcessKeysBySlot(
|
||||
ctx context.Context,
|
||||
keys []string,
|
||||
processFunc func(ctx context.Context, slot int64, keys []string) error,
|
||||
) error {
|
||||
|
||||
// Group keys by slot
|
||||
slots, err := groupKeysBySlot(ctx, rsm.redisClient, keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
g.SetLimit(rsm.config.concurrentLimit)
|
||||
|
||||
// Process keys in each slot using the provided function
|
||||
for slot, singleSlotKeys := range slots {
|
||||
batches := splitIntoBatches(singleSlotKeys, rsm.config.batchSize)
|
||||
for _, batch := range batches {
|
||||
slot, batch := slot, batch // Avoid closure capture issue
|
||||
g.Go(func() error {
|
||||
err := processFunc(ctx, slot, batch)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "Batch processFunc failed", err, "slot", slot, "keys", batch)
|
||||
if !rsm.config.continueOnError {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// groupKeysBySlot groups keys by their Redis cluster hash slots.
|
||||
func groupKeysBySlot(ctx context.Context, redisClient redis.UniversalClient, keys []string) (map[int64][]string, error) {
|
||||
slots := make(map[int64][]string)
|
||||
clusterClient, isCluster := redisClient.(*redis.ClusterClient)
|
||||
if isCluster {
|
||||
pipe := clusterClient.Pipeline()
|
||||
cmds := make([]*redis.IntCmd, len(keys))
|
||||
for i, key := range keys {
|
||||
cmds[i] = pipe.ClusterKeySlot(ctx, key)
|
||||
}
|
||||
_, err := pipe.Exec(ctx)
|
||||
if err != nil {
|
||||
return nil, errs.WrapMsg(err, "get slot err")
|
||||
}
|
||||
|
||||
for i, cmd := range cmds {
|
||||
slot, err := cmd.Result()
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "some key get slot err", err, "key", keys[i])
|
||||
return nil, errs.WrapMsg(err, "get slot err", "key", keys[i])
|
||||
}
|
||||
slots[slot] = append(slots[slot], keys[i])
|
||||
}
|
||||
} else {
|
||||
// If not a cluster client, put all keys in the same slot (0)
|
||||
slots[0] = keys
|
||||
}
|
||||
|
||||
return slots, nil
|
||||
}
|
||||
|
||||
// splitIntoBatches splits keys into batches of the specified size
|
||||
func splitIntoBatches(keys []string, batchSize int) [][]string {
|
||||
var batches [][]string
|
||||
for batchSize < len(keys) {
|
||||
keys, batches = keys[batchSize:], append(batches, keys[0:batchSize:batchSize])
|
||||
}
|
||||
return append(batches, keys)
|
||||
}
|
||||
|
||||
// ProcessKeysBySlot groups keys by their Redis cluster hash slots and processes them using the provided function.
|
||||
func ProcessKeysBySlot(
|
||||
ctx context.Context,
|
||||
redisClient redis.UniversalClient,
|
||||
keys []string,
|
||||
processFunc func(ctx context.Context, slot int64, keys []string) error,
|
||||
opts ...Option,
|
||||
) error {
|
||||
|
||||
config := &Config{
|
||||
batchSize: defaultBatchSize,
|
||||
continueOnError: false,
|
||||
concurrentLimit: defaultConcurrentLimit,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
}
|
||||
|
||||
// Group keys by slot
|
||||
slots, err := groupKeysBySlot(ctx, redisClient, keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
g.SetLimit(config.concurrentLimit)
|
||||
|
||||
// Process keys in each slot using the provided function
|
||||
for slot, singleSlotKeys := range slots {
|
||||
batches := splitIntoBatches(singleSlotKeys, config.batchSize)
|
||||
for _, batch := range batches {
|
||||
slot, batch := slot, batch // Avoid closure capture issue
|
||||
g.Go(func() error {
|
||||
err := processFunc(ctx, slot, batch)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "Batch processFunc failed", err, "slot", slot, "keys", batch)
|
||||
if !config.continueOnError {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -12,55 +12,55 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/s3"
|
||||
"github.com/openimsdk/tools/s3/cont"
|
||||
"github.com/openimsdk/tools/s3/minio"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/dtm-labs/rockscache"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/tools/s3"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ObjectCache interface {
|
||||
metaCache
|
||||
GetName(ctx context.Context, engine string, name string) (*relationtb.ObjectModel, error)
|
||||
DelObjectName(engine string, names ...string) ObjectCache
|
||||
}
|
||||
|
||||
func NewObjectCacheRedis(rdb redis.UniversalClient, objDB relationtb.ObjectInfoModelInterface) ObjectCache {
|
||||
rcClient := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
|
||||
func NewObjectCacheRedis(rdb redis.UniversalClient, objDB database.ObjectInfo) cache.ObjectCache {
|
||||
opts := rockscache.NewDefaultOptions()
|
||||
batchHandler := NewBatchDeleterRedis(rdb, &opts, nil)
|
||||
return &objectCacheRedis{
|
||||
rcClient: rcClient,
|
||||
expireTime: time.Hour * 12,
|
||||
objDB: objDB,
|
||||
metaCache: NewMetaCacheRedis(rcClient),
|
||||
BatchDeleter: batchHandler,
|
||||
rcClient: rockscache.NewClient(rdb, opts),
|
||||
expireTime: time.Hour * 12,
|
||||
objDB: objDB,
|
||||
}
|
||||
}
|
||||
|
||||
type objectCacheRedis struct {
|
||||
metaCache
|
||||
objDB relationtb.ObjectInfoModelInterface
|
||||
cache.BatchDeleter
|
||||
objDB database.ObjectInfo
|
||||
rcClient *rockscache.Client
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) NewCache() ObjectCache {
|
||||
func (g *objectCacheRedis) getObjectKey(engine string, name string) string {
|
||||
return cachekey.GetObjectKey(engine, name)
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) CloneObjectCache() cache.ObjectCache {
|
||||
return &objectCacheRedis{
|
||||
rcClient: g.rcClient,
|
||||
expireTime: g.expireTime,
|
||||
objDB: g.objDB,
|
||||
metaCache: NewMetaCacheRedis(g.rcClient, g.metaCache.GetPreDelKeys()...),
|
||||
BatchDeleter: g.BatchDeleter.Clone(),
|
||||
rcClient: g.rcClient,
|
||||
expireTime: g.expireTime,
|
||||
objDB: g.objDB,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) DelObjectName(engine string, names ...string) ObjectCache {
|
||||
objectCache := g.NewCache()
|
||||
func (g *objectCacheRedis) DelObjectName(engine string, names ...string) cache.ObjectCache {
|
||||
objectCache := g.CloneObjectCache()
|
||||
keys := make([]string, 0, len(names))
|
||||
for _, name := range names {
|
||||
keys = append(keys, g.getObjectKey(name, engine))
|
||||
@ -69,60 +69,40 @@ func (g *objectCacheRedis) DelObjectName(engine string, names ...string) ObjectC
|
||||
return objectCache
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) getObjectKey(engine string, name string) string {
|
||||
return "OBJECT:" + engine + ":" + name
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) GetName(ctx context.Context, engine string, name string) (*relationtb.ObjectModel, error) {
|
||||
return getCache(ctx, g.rcClient, g.getObjectKey(name, engine), g.expireTime, func(ctx context.Context) (*relationtb.ObjectModel, error) {
|
||||
func (g *objectCacheRedis) GetName(ctx context.Context, engine string, name string) (*model.Object, error) {
|
||||
return getCache(ctx, g.rcClient, g.getObjectKey(name, engine), g.expireTime, func(ctx context.Context) (*model.Object, error) {
|
||||
return g.objDB.Take(ctx, engine, name)
|
||||
})
|
||||
}
|
||||
|
||||
type S3Cache interface {
|
||||
metaCache
|
||||
GetKey(ctx context.Context, engine string, key string) (*s3.ObjectInfo, error)
|
||||
DelS3Key(engine string, keys ...string) S3Cache
|
||||
}
|
||||
|
||||
func NewS3Cache(rdb redis.UniversalClient, s3 s3.Interface) cont.S3Cache {
|
||||
rcClient := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
|
||||
opts := rockscache.NewDefaultOptions()
|
||||
batchHandler := NewBatchDeleterRedis(rdb, &opts, nil)
|
||||
return &s3CacheRedis{
|
||||
rcClient: rcClient,
|
||||
expireTime: time.Hour * 12,
|
||||
s3: s3,
|
||||
metaCache: NewMetaCacheRedis(rcClient),
|
||||
BatchDeleter: batchHandler,
|
||||
rcClient: rockscache.NewClient(rdb, opts),
|
||||
expireTime: time.Hour * 12,
|
||||
s3: s3,
|
||||
}
|
||||
}
|
||||
|
||||
type s3CacheRedis struct {
|
||||
metaCache
|
||||
cache.BatchDeleter
|
||||
s3 s3.Interface
|
||||
rcClient *rockscache.Client
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (g *s3CacheRedis) newCache() *s3CacheRedis {
|
||||
return &s3CacheRedis{
|
||||
rcClient: g.rcClient,
|
||||
expireTime: g.expireTime,
|
||||
s3: g.s3,
|
||||
metaCache: NewMetaCacheRedis(g.rcClient, g.metaCache.GetPreDelKeys()...),
|
||||
}
|
||||
func (g *s3CacheRedis) getS3Key(engine string, name string) string {
|
||||
return cachekey.GetS3Key(engine, name)
|
||||
}
|
||||
|
||||
func (g *s3CacheRedis) DelS3Key(ctx context.Context, engine string, keys ...string) error {
|
||||
s3cache := g.newCache()
|
||||
ks := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
ks = append(ks, g.getS3Key(engine, key))
|
||||
}
|
||||
s3cache.AddKeys(ks...)
|
||||
return s3cache.ExecDel(ctx)
|
||||
}
|
||||
|
||||
func (g *s3CacheRedis) getS3Key(engine string, name string) string {
|
||||
return "S3:" + engine + ":" + name
|
||||
return g.BatchDeleter.ExecDelWithKeys(ctx, ks)
|
||||
}
|
||||
|
||||
func (g *s3CacheRedis) GetKey(ctx context.Context, engine string, name string) (*s3.ObjectInfo, error) {
|
||||
@ -131,59 +111,41 @@ func (g *s3CacheRedis) GetKey(ctx context.Context, engine string, name string) (
|
||||
})
|
||||
}
|
||||
|
||||
type MinioCache interface {
|
||||
metaCache
|
||||
GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*MinioImageInfo, error)) (*MinioImageInfo, error)
|
||||
GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error)
|
||||
DelObjectImageInfoKey(keys ...string) MinioCache
|
||||
DelImageThumbnailKey(key string, format string, width int, height int) MinioCache
|
||||
}
|
||||
|
||||
func NewMinioCache(rdb redis.UniversalClient) minio.Cache {
|
||||
rcClient := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
|
||||
opts := rockscache.NewDefaultOptions()
|
||||
batchHandler := NewBatchDeleterRedis(rdb, &opts, nil)
|
||||
return &minioCacheRedis{
|
||||
rcClient: rcClient,
|
||||
expireTime: time.Hour * 24 * 7,
|
||||
metaCache: NewMetaCacheRedis(rcClient),
|
||||
BatchDeleter: batchHandler,
|
||||
rcClient: rockscache.NewClient(rdb, opts),
|
||||
expireTime: time.Hour * 24 * 7,
|
||||
}
|
||||
}
|
||||
|
||||
type minioCacheRedis struct {
|
||||
metaCache
|
||||
cache.BatchDeleter
|
||||
rcClient *rockscache.Client
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) newCache() *minioCacheRedis {
|
||||
return &minioCacheRedis{
|
||||
rcClient: g.rcClient,
|
||||
expireTime: g.expireTime,
|
||||
metaCache: NewMetaCacheRedis(g.rcClient, g.metaCache.GetPreDelKeys()...),
|
||||
}
|
||||
func (g *minioCacheRedis) getObjectImageInfoKey(key string) string {
|
||||
return cachekey.GetObjectImageInfoKey(key)
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) getMinioImageThumbnailKey(key string, format string, width int, height int) string {
|
||||
return cachekey.GetMinioImageThumbnailKey(key, format, width, height)
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) DelObjectImageInfoKey(ctx context.Context, keys ...string) error {
|
||||
s3cache := g.newCache()
|
||||
ks := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
ks = append(ks, g.getObjectImageInfoKey(key))
|
||||
}
|
||||
s3cache.AddKeys(ks...)
|
||||
return s3cache.ExecDel(ctx)
|
||||
return g.BatchDeleter.ExecDelWithKeys(ctx, ks)
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) DelImageThumbnailKey(ctx context.Context, key string, format string, width int, height int) error {
|
||||
s3cache := g.newCache()
|
||||
s3cache.AddKeys(g.getMinioImageThumbnailKey(key, format, width, height))
|
||||
return s3cache.ExecDel(ctx)
|
||||
}
|
||||
return g.BatchDeleter.ExecDelWithKeys(ctx, []string{g.getMinioImageThumbnailKey(key, format, width, height)})
|
||||
|
||||
func (g *minioCacheRedis) getObjectImageInfoKey(key string) string {
|
||||
return "MINIO:IMAGE:" + key
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) getMinioImageThumbnailKey(key string, format string, width int, height int) string {
|
||||
return "MINIO:THUMBNAIL:" + format + ":w" + strconv.Itoa(width) + ":h" + strconv.Itoa(height) + ":" + key
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*minio.ImageInfo, error)) (*minio.ImageInfo, error) {
|
||||
@ -197,11 +159,3 @@ func (g *minioCacheRedis) GetImageObjectKeyInfo(ctx context.Context, key string,
|
||||
func (g *minioCacheRedis) GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error) {
|
||||
return getCache(ctx, g.rcClient, g.getMinioImageThumbnailKey(key, format, width, height), g.expireTime, minioCache)
|
||||
}
|
||||
|
||||
type MinioImageInfo struct {
|
||||
IsImg bool `json:"isImg"`
|
||||
Width int `json:"width"`
|
||||
Height int `json:"height"`
|
||||
Format string `json:"format"`
|
||||
Etag string `json:"etag"`
|
||||
}
|
||||
@ -1,38 +1,29 @@
|
||||
package cache
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/stringutil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
type SeqCache interface {
|
||||
SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error
|
||||
GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
|
||||
GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error
|
||||
SetMinSeqs(ctx context.Context, seqs map[string]int64) error
|
||||
GetMinSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
|
||||
GetMinSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (map[string]int64, error)
|
||||
SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error
|
||||
// seqs map: key userID value minSeq
|
||||
SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error)
|
||||
// seqs map: key conversationID value minSeq
|
||||
SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error
|
||||
// has read seq
|
||||
SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error
|
||||
// k: user, v: seq
|
||||
SetHasReadSeqs(ctx context.Context, conversationID string, hasReadSeqs map[string]int64) error
|
||||
// k: conversation, v :seq
|
||||
UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error
|
||||
GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error)
|
||||
GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error)
|
||||
}
|
||||
|
||||
func NewSeqCache(rdb redis.UniversalClient) SeqCache {
|
||||
func NewSeqCache(rdb redis.UniversalClient) cache.SeqCache {
|
||||
return &seqCache{rdb: rdb}
|
||||
}
|
||||
|
||||
@ -41,19 +32,19 @@ type seqCache struct {
|
||||
}
|
||||
|
||||
func (c *seqCache) getMaxSeqKey(conversationID string) string {
|
||||
return maxSeq + conversationID
|
||||
return cachekey.GetMaxSeqKey(conversationID)
|
||||
}
|
||||
|
||||
func (c *seqCache) getMinSeqKey(conversationID string) string {
|
||||
return minSeq + conversationID
|
||||
return cachekey.GetMinSeqKey(conversationID)
|
||||
}
|
||||
|
||||
func (c *seqCache) getHasReadSeqKey(conversationID string, userID string) string {
|
||||
return hasReadSeq + userID + ":" + conversationID
|
||||
return cachekey.GetHasReadSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (c *seqCache) getConversationUserMinSeqKey(conversationID, userID string) string {
|
||||
return conversationUserMinSeq + conversationID + "u:" + userID
|
||||
return cachekey.GetConversationUserMinSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (c *seqCache) setSeq(ctx context.Context, conversationID string, seq int64, getkey func(conversationID string) string) error {
|
||||
103
pkg/common/storage/cache/redis/third.go
vendored
Normal file
103
pkg/common/storage/cache/redis/third.go
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewThirdCache(rdb redis.UniversalClient) cache.ThirdCache {
|
||||
return &thirdCache{rdb: rdb}
|
||||
}
|
||||
|
||||
type thirdCache struct {
|
||||
rdb redis.UniversalClient
|
||||
}
|
||||
|
||||
func (c *thirdCache) getGetuiTokenKey() string {
|
||||
return cachekey.GetGetuiTokenKey()
|
||||
}
|
||||
|
||||
func (c *thirdCache) getGetuiTaskIDKey() string {
|
||||
return cachekey.GetGetuiTaskIDKey()
|
||||
}
|
||||
|
||||
func (c *thirdCache) getUserBadgeUnreadCountSumKey(userID string) string {
|
||||
return cachekey.GetUserBadgeUnreadCountSumKey(userID)
|
||||
}
|
||||
|
||||
func (c *thirdCache) getFcmAccountTokenKey(account string, platformID int) string {
|
||||
return cachekey.GetFcmAccountTokenKey(account, platformID)
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error) {
|
||||
return errs.Wrap(c.rdb.Set(ctx, c.getFcmAccountTokenKey(account, platformID), fcmToken, time.Duration(expireTime)*time.Second).Err())
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetFcmToken(ctx context.Context, account string, platformID int) (string, error) {
|
||||
val, err := c.rdb.Get(ctx, c.getFcmAccountTokenKey(account, platformID)).Result()
|
||||
if err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *thirdCache) DelFcmToken(ctx context.Context, account string, platformID int) error {
|
||||
return errs.Wrap(c.rdb.Del(ctx, c.getFcmAccountTokenKey(account, platformID)).Err())
|
||||
}
|
||||
|
||||
func (c *thirdCache) IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
|
||||
seq, err := c.rdb.Incr(ctx, c.getUserBadgeUnreadCountSumKey(userID)).Result()
|
||||
|
||||
return int(seq), errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetUserBadgeUnreadCountSum(ctx context.Context, userID string, value int) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, c.getUserBadgeUnreadCountSumKey(userID), value, 0).Err())
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
|
||||
val, err := c.rdb.Get(ctx, c.getUserBadgeUnreadCountSumKey(userID)).Int()
|
||||
return val, errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetGetuiToken(ctx context.Context, token string, expireTime int64) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, c.getGetuiTokenKey(), token, time.Duration(expireTime)*time.Second).Err())
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetGetuiToken(ctx context.Context) (string, error) {
|
||||
val, err := c.rdb.Get(ctx, c.getGetuiTokenKey()).Result()
|
||||
if err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetGetuiTaskID(ctx context.Context, taskID string, expireTime int64) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, c.getGetuiTaskIDKey(), taskID, time.Duration(expireTime)*time.Second).Err())
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetGetuiTaskID(ctx context.Context) (string, error) {
|
||||
val, err := c.rdb.Get(ctx, c.getGetuiTaskIDKey()).Result()
|
||||
if err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
@ -1,31 +1,38 @@
|
||||
package cache
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/stringutil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewTokenCacheModel(rdb redis.UniversalClient) TokenModel {
|
||||
type tokenCache struct {
|
||||
rdb redis.UniversalClient
|
||||
}
|
||||
|
||||
func NewTokenCacheModel(rdb redis.UniversalClient) cache.TokenModel {
|
||||
return &tokenCache{
|
||||
rdb: rdb,
|
||||
}
|
||||
}
|
||||
|
||||
type TokenModel interface {
|
||||
AddTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error
|
||||
GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error)
|
||||
SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error
|
||||
DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error
|
||||
}
|
||||
|
||||
type tokenCache struct {
|
||||
rdb redis.UniversalClient
|
||||
}
|
||||
|
||||
func (c *tokenCache) AddTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error {
|
||||
return errs.Wrap(c.rdb.HSet(ctx, cachekey.GetTokenKey(userID, platformID), token, flag).Err())
|
||||
}
|
||||
@ -12,78 +12,66 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"hash/crc32"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/user"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"hash/crc32"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
userExpireTime = time.Second * 60 * 60 * 12
|
||||
olineStatusKey = "ONLINE_STATUS:"
|
||||
userOlineStatusExpireTime = time.Second * 60 * 60 * 24
|
||||
statusMod = 501
|
||||
)
|
||||
|
||||
type UserCache interface {
|
||||
metaCache
|
||||
NewCache() UserCache
|
||||
GetUserInfo(ctx context.Context, userID string) (userInfo *relationtb.UserModel, err error)
|
||||
GetUsersInfo(ctx context.Context, userIDs []string) ([]*relationtb.UserModel, error)
|
||||
DelUsersInfo(userIDs ...string) UserCache
|
||||
GetUserGlobalRecvMsgOpt(ctx context.Context, userID string) (opt int, err error)
|
||||
DelUsersGlobalRecvMsgOpt(userIDs ...string) UserCache
|
||||
GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error)
|
||||
SetUserStatus(ctx context.Context, userID string, status, platformID int32) error
|
||||
}
|
||||
|
||||
type UserCacheRedis struct {
|
||||
metaCache
|
||||
rdb redis.UniversalClient
|
||||
// userDB relationtb.UserModelInterface
|
||||
userDB relationtb.UserModelInterface
|
||||
cache.BatchDeleter
|
||||
rdb redis.UniversalClient
|
||||
userDB database.User
|
||||
expireTime time.Duration
|
||||
rcClient *rockscache.Client
|
||||
}
|
||||
|
||||
func NewUserCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, userDB relationtb.UserModelInterface, options rockscache.Options) UserCache {
|
||||
rcClient := rockscache.NewClient(rdb, options)
|
||||
mc := NewMetaCacheRedis(rcClient)
|
||||
func NewUserCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, userDB database.User, options *rockscache.Options) cache.UserCache {
|
||||
batchHandler := NewBatchDeleterRedis(rdb, options, []string{localCache.User.Topic})
|
||||
u := localCache.User
|
||||
log.ZDebug(context.Background(), "user local cache init", "Topic", u.Topic, "SlotNum", u.SlotNum, "SlotSize", u.SlotSize, "enable", u.Enable())
|
||||
mc.SetTopic(u.Topic)
|
||||
mc.SetRawRedisClient(rdb)
|
||||
return &UserCacheRedis{
|
||||
rdb: rdb,
|
||||
metaCache: NewMetaCacheRedis(rcClient),
|
||||
userDB: userDB,
|
||||
expireTime: userExpireTime,
|
||||
rcClient: rcClient,
|
||||
BatchDeleter: batchHandler,
|
||||
rdb: rdb,
|
||||
userDB: userDB,
|
||||
expireTime: userExpireTime,
|
||||
rcClient: rockscache.NewClient(rdb, *options),
|
||||
}
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) NewCache() UserCache {
|
||||
func (u *UserCacheRedis) getOnlineStatusKey(modKey string) string {
|
||||
return cachekey.GetOnlineStatusKey(modKey)
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) CloneUserCache() cache.UserCache {
|
||||
return &UserCacheRedis{
|
||||
rdb: u.rdb,
|
||||
metaCache: u.Copy(),
|
||||
userDB: u.userDB,
|
||||
expireTime: u.expireTime,
|
||||
rcClient: u.rcClient,
|
||||
BatchDeleter: u.BatchDeleter.Clone(),
|
||||
rdb: u.rdb,
|
||||
userDB: u.userDB,
|
||||
expireTime: u.expireTime,
|
||||
rcClient: u.rcClient,
|
||||
}
|
||||
}
|
||||
|
||||
@ -95,26 +83,26 @@ func (u *UserCacheRedis) getUserGlobalRecvMsgOptKey(userID string) string {
|
||||
return cachekey.GetUserGlobalRecvMsgOptKey(userID)
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) GetUserInfo(ctx context.Context, userID string) (userInfo *relationtb.UserModel, err error) {
|
||||
return getCache(ctx, u.rcClient, u.getUserInfoKey(userID), u.expireTime, func(ctx context.Context) (*relationtb.UserModel, error) {
|
||||
func (u *UserCacheRedis) GetUserInfo(ctx context.Context, userID string) (userInfo *model.User, err error) {
|
||||
return getCache(ctx, u.rcClient, u.getUserInfoKey(userID), u.expireTime, func(ctx context.Context) (*model.User, error) {
|
||||
return u.userDB.Take(ctx, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) GetUsersInfo(ctx context.Context, userIDs []string) ([]*relationtb.UserModel, error) {
|
||||
return batchGetCache2(ctx, u.rcClient, u.expireTime, userIDs, func(userID string) string {
|
||||
func (u *UserCacheRedis) GetUsersInfo(ctx context.Context, userIDs []string) ([]*model.User, error) {
|
||||
return batchGetCache(ctx, u.rcClient, u.expireTime, userIDs, func(userID string) string {
|
||||
return u.getUserInfoKey(userID)
|
||||
}, func(ctx context.Context, userID string) (*relationtb.UserModel, error) {
|
||||
}, func(ctx context.Context, userID string) (*model.User, error) {
|
||||
return u.userDB.Take(ctx, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) DelUsersInfo(userIDs ...string) UserCache {
|
||||
func (u *UserCacheRedis) DelUsersInfo(userIDs ...string) cache.UserCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, u.getUserInfoKey(userID))
|
||||
}
|
||||
cache := u.NewCache()
|
||||
cache := u.CloneUserCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
@ -132,12 +120,12 @@ func (u *UserCacheRedis) GetUserGlobalRecvMsgOpt(ctx context.Context, userID str
|
||||
)
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) DelUsersGlobalRecvMsgOpt(userIDs ...string) UserCache {
|
||||
func (u *UserCacheRedis) DelUsersGlobalRecvMsgOpt(userIDs ...string) cache.UserCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, u.getUserGlobalRecvMsgOptKey(userID))
|
||||
}
|
||||
cache := u.NewCache()
|
||||
cache := u.CloneUserCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
@ -150,7 +138,7 @@ func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([
|
||||
UserIDNum := crc32.ChecksumIEEE([]byte(userID))
|
||||
modKey := strconv.Itoa(int(UserIDNum % statusMod))
|
||||
var onlineStatus user.OnlineStatus
|
||||
key := olineStatusKey + modKey
|
||||
key := u.getOnlineStatusKey(modKey)
|
||||
result, err := u.rdb.HGet(ctx, key, userID).Result()
|
||||
if err != nil {
|
||||
if errors.Is(err, redis.Nil) {
|
||||
@ -182,7 +170,7 @@ func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([
|
||||
func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, status, platformID int32) error {
|
||||
UserIDNum := crc32.ChecksumIEEE([]byte(userID))
|
||||
modKey := strconv.Itoa(int(UserIDNum % statusMod))
|
||||
key := olineStatusKey + modKey
|
||||
key := u.getOnlineStatusKey(modKey)
|
||||
log.ZDebug(ctx, "SetUserStatus args", "userID", userID, "status", status, "platformID", platformID, "modKey", modKey, "key", key)
|
||||
isNewKey, err := u.rdb.Exists(ctx, key).Result()
|
||||
if err != nil {
|
||||
51
pkg/common/storage/cache/s3.go
vendored
Normal file
51
pkg/common/storage/cache/s3.go
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/s3"
|
||||
)
|
||||
|
||||
type ObjectCache interface {
|
||||
BatchDeleter
|
||||
CloneObjectCache() ObjectCache
|
||||
GetName(ctx context.Context, engine string, name string) (*relationtb.Object, error)
|
||||
DelObjectName(engine string, names ...string) ObjectCache
|
||||
}
|
||||
|
||||
type S3Cache interface {
|
||||
BatchDeleter
|
||||
GetKey(ctx context.Context, engine string, key string) (*s3.ObjectInfo, error)
|
||||
DelS3Key(engine string, keys ...string) S3Cache
|
||||
}
|
||||
|
||||
// TODO integrating minio.Cache and MinioCache interfaces.
|
||||
type MinioCache interface {
|
||||
BatchDeleter
|
||||
GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*MinioImageInfo, error)) (*MinioImageInfo, error)
|
||||
GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error)
|
||||
DelObjectImageInfoKey(keys ...string) MinioCache
|
||||
DelImageThumbnailKey(key string, format string, width int, height int) MinioCache
|
||||
}
|
||||
|
||||
type MinioImageInfo struct {
|
||||
IsImg bool `json:"isImg"`
|
||||
Width int `json:"width"`
|
||||
Height int `json:"height"`
|
||||
Format string `json:"format"`
|
||||
Etag string `json:"etag"`
|
||||
}
|
||||
30
pkg/common/storage/cache/seq.go
vendored
Normal file
30
pkg/common/storage/cache/seq.go
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type SeqCache interface {
|
||||
SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error
|
||||
GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
|
||||
GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error
|
||||
SetMinSeqs(ctx context.Context, seqs map[string]int64) error
|
||||
GetMinSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
|
||||
GetMinSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (map[string]int64, error)
|
||||
SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error
|
||||
// seqs map: key userID value minSeq
|
||||
SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error)
|
||||
// seqs map: key conversationID value minSeq
|
||||
SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error
|
||||
// has read seq
|
||||
SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error
|
||||
// k: user, v: seq
|
||||
SetHasReadSeqs(ctx context.Context, conversationID string, hasReadSeqs map[string]int64) error
|
||||
// k: conversation, v :seq
|
||||
UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error
|
||||
GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error)
|
||||
GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error)
|
||||
}
|
||||
18
pkg/common/storage/cache/third.go
vendored
Normal file
18
pkg/common/storage/cache/third.go
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type ThirdCache interface {
|
||||
SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error)
|
||||
GetFcmToken(ctx context.Context, account string, platformID int) (string, error)
|
||||
DelFcmToken(ctx context.Context, account string, platformID int) error
|
||||
IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error)
|
||||
SetUserBadgeUnreadCountSum(ctx context.Context, userID string, value int) error
|
||||
GetUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error)
|
||||
SetGetuiToken(ctx context.Context, token string, expireTime int64) error
|
||||
GetGetuiToken(ctx context.Context) (string, error)
|
||||
SetGetuiTaskID(ctx context.Context, taskID string, expireTime int64) error
|
||||
GetGetuiTaskID(ctx context.Context) (string, error)
|
||||
}
|
||||
12
pkg/common/storage/cache/token.go
vendored
Normal file
12
pkg/common/storage/cache/token.go
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type TokenModel interface {
|
||||
AddTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error
|
||||
GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error)
|
||||
SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error
|
||||
DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error
|
||||
}
|
||||
33
pkg/common/storage/cache/user.go
vendored
Normal file
33
pkg/common/storage/cache/user.go
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/protocol/user"
|
||||
)
|
||||
|
||||
type UserCache interface {
|
||||
BatchDeleter
|
||||
CloneUserCache() UserCache
|
||||
GetUserInfo(ctx context.Context, userID string) (userInfo *model.User, err error)
|
||||
GetUsersInfo(ctx context.Context, userIDs []string) ([]*model.User, error)
|
||||
DelUsersInfo(userIDs ...string) UserCache
|
||||
GetUserGlobalRecvMsgOpt(ctx context.Context, userID string) (opt int, err error)
|
||||
DelUsersGlobalRecvMsgOpt(userIDs ...string) UserCache
|
||||
GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error)
|
||||
SetUserStatus(ctx context.Context, userID string, status, platformID int32) error
|
||||
}
|
||||
26
pkg/common/storage/common/types.go
Normal file
26
pkg/common/storage/common/types.go
Normal file
@ -0,0 +1,26 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package common
|
||||
|
||||
type BatchUpdateGroupMember struct {
|
||||
GroupID string
|
||||
UserID string
|
||||
Map map[string]any
|
||||
}
|
||||
|
||||
type GroupSimpleUserID struct {
|
||||
Hash uint64
|
||||
MemberNum uint32
|
||||
}
|
||||
@ -19,7 +19,7 @@ import (
|
||||
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/tokenverify"
|
||||
@ -16,9 +16,9 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/db/pagination"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
@ -26,27 +26,27 @@ import (
|
||||
|
||||
type BlackDatabase interface {
|
||||
// Create add BlackList
|
||||
Create(ctx context.Context, blacks []*relation.BlackModel) (err error)
|
||||
Create(ctx context.Context, blacks []*model.Black) (err error)
|
||||
// Delete delete BlackList
|
||||
Delete(ctx context.Context, blacks []*relation.BlackModel) (err error)
|
||||
Delete(ctx context.Context, blacks []*model.Black) (err error)
|
||||
// FindOwnerBlacks get BlackList list
|
||||
FindOwnerBlacks(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (total int64, blacks []*relation.BlackModel, err error)
|
||||
FindBlackInfos(ctx context.Context, ownerUserID string, userIDs []string) (blacks []*relation.BlackModel, err error)
|
||||
FindOwnerBlacks(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (total int64, blacks []*model.Black, err error)
|
||||
FindBlackInfos(ctx context.Context, ownerUserID string, userIDs []string) (blacks []*model.Black, err error)
|
||||
// CheckIn Check whether user2 is in the black list of user1 (inUser1Blacks==true) Check whether user1 is in the black list of user2 (inUser2Blacks==true)
|
||||
CheckIn(ctx context.Context, userID1, userID2 string) (inUser1Blacks bool, inUser2Blacks bool, err error)
|
||||
}
|
||||
|
||||
type blackDatabase struct {
|
||||
black relation.BlackModelInterface
|
||||
black database.Black
|
||||
cache cache.BlackCache
|
||||
}
|
||||
|
||||
func NewBlackDatabase(black relation.BlackModelInterface, cache cache.BlackCache) BlackDatabase {
|
||||
func NewBlackDatabase(black database.Black, cache cache.BlackCache) BlackDatabase {
|
||||
return &blackDatabase{black, cache}
|
||||
}
|
||||
|
||||
// Create Add Blacklist.
|
||||
func (b *blackDatabase) Create(ctx context.Context, blacks []*relation.BlackModel) (err error) {
|
||||
func (b *blackDatabase) Create(ctx context.Context, blacks []*model.Black) (err error) {
|
||||
if err := b.black.Create(ctx, blacks); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -54,7 +54,7 @@ func (b *blackDatabase) Create(ctx context.Context, blacks []*relation.BlackMode
|
||||
}
|
||||
|
||||
// Delete Delete Blacklist.
|
||||
func (b *blackDatabase) Delete(ctx context.Context, blacks []*relation.BlackModel) (err error) {
|
||||
func (b *blackDatabase) Delete(ctx context.Context, blacks []*model.Black) (err error) {
|
||||
if err := b.black.Delete(ctx, blacks); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -62,16 +62,16 @@ func (b *blackDatabase) Delete(ctx context.Context, blacks []*relation.BlackMode
|
||||
}
|
||||
|
||||
// FindOwnerBlacks Get Blacklist List.
|
||||
func (b *blackDatabase) deleteBlackIDsCache(ctx context.Context, blacks []*relation.BlackModel) (err error) {
|
||||
cache := b.cache.NewCache()
|
||||
func (b *blackDatabase) deleteBlackIDsCache(ctx context.Context, blacks []*model.Black) (err error) {
|
||||
cache := b.cache.CloneBlackCache()
|
||||
for _, black := range blacks {
|
||||
cache = cache.DelBlackIDs(ctx, black.OwnerUserID)
|
||||
}
|
||||
return cache.ExecDel(ctx)
|
||||
return cache.ChainExecDel(ctx)
|
||||
}
|
||||
|
||||
// FindOwnerBlacks Get Blacklist List.
|
||||
func (b *blackDatabase) FindOwnerBlacks(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (total int64, blacks []*relation.BlackModel, err error) {
|
||||
func (b *blackDatabase) FindOwnerBlacks(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (total int64, blacks []*model.Black, err error) {
|
||||
return b.black.FindOwnerBlacks(ctx, ownerUserID, pagination)
|
||||
}
|
||||
|
||||
@ -95,6 +95,6 @@ func (b *blackDatabase) FindBlackIDs(ctx context.Context, ownerUserID string) (b
|
||||
}
|
||||
|
||||
// FindBlackInfos Get Blacklist List.
|
||||
func (b *blackDatabase) FindBlackInfos(ctx context.Context, ownerUserID string, userIDs []string) (blacks []*relation.BlackModel, err error) {
|
||||
func (b *blackDatabase) FindBlackInfos(ctx context.Context, ownerUserID string, userIDs []string) (blacks []*model.Black, err error) {
|
||||
return b.black.FindOwnerBlackInfos(ctx, ownerUserID, userIDs)
|
||||
}
|
||||
@ -16,10 +16,11 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/tools/db/pagination"
|
||||
@ -33,18 +34,18 @@ type ConversationDatabase interface {
|
||||
// UpdateUsersConversationField updates the properties of a conversation for specified users.
|
||||
UpdateUsersConversationField(ctx context.Context, userIDs []string, conversationID string, args map[string]any) error
|
||||
// CreateConversation creates a batch of new conversations.
|
||||
CreateConversation(ctx context.Context, conversations []*relationtb.ConversationModel) error
|
||||
CreateConversation(ctx context.Context, conversations []*relationtb.Conversation) error
|
||||
// SyncPeerUserPrivateConversationTx ensures transactional operation while syncing private conversations between peers.
|
||||
SyncPeerUserPrivateConversationTx(ctx context.Context, conversation []*relationtb.ConversationModel) error
|
||||
SyncPeerUserPrivateConversationTx(ctx context.Context, conversation []*relationtb.Conversation) error
|
||||
// FindConversations retrieves multiple conversations of a user by conversation IDs.
|
||||
FindConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*relationtb.ConversationModel, error)
|
||||
FindConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*relationtb.Conversation, error)
|
||||
// GetUserAllConversation fetches all conversations of a user on the server.
|
||||
GetUserAllConversation(ctx context.Context, ownerUserID string) ([]*relationtb.ConversationModel, error)
|
||||
GetUserAllConversation(ctx context.Context, ownerUserID string) ([]*relationtb.Conversation, error)
|
||||
// SetUserConversations sets multiple conversation properties for a user, creates new conversations if they do not exist, or updates them otherwise. This operation is atomic.
|
||||
SetUserConversations(ctx context.Context, ownerUserID string, conversations []*relationtb.ConversationModel) error
|
||||
SetUserConversations(ctx context.Context, ownerUserID string, conversations []*relationtb.Conversation) error
|
||||
// SetUsersConversationFieldTx updates a specific field for multiple users' conversations, creating new conversations if they do not exist, or updates them otherwise. This operation is
|
||||
// transactional.
|
||||
SetUsersConversationFieldTx(ctx context.Context, userIDs []string, conversation *relationtb.ConversationModel, fieldMap map[string]any) error
|
||||
SetUsersConversationFieldTx(ctx context.Context, userIDs []string, conversation *relationtb.Conversation, fieldMap map[string]any) error
|
||||
// CreateGroupChatConversation creates a group chat conversation for the specified group ID and user IDs.
|
||||
CreateGroupChatConversation(ctx context.Context, groupID string, userIDs []string) error
|
||||
// GetConversationIDs retrieves conversation IDs for a given user.
|
||||
@ -58,16 +59,16 @@ type ConversationDatabase interface {
|
||||
// PageConversationIDs paginates through conversation IDs based on the specified pagination settings.
|
||||
PageConversationIDs(ctx context.Context, pagination pagination.Pagination) (conversationIDs []string, err error)
|
||||
// GetConversationsByConversationID retrieves conversations by their IDs.
|
||||
GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*relationtb.ConversationModel, error)
|
||||
GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*relationtb.Conversation, error)
|
||||
// GetConversationIDsNeedDestruct fetches conversations that need to be destructed based on specific criteria.
|
||||
GetConversationIDsNeedDestruct(ctx context.Context) ([]*relationtb.ConversationModel, error)
|
||||
GetConversationIDsNeedDestruct(ctx context.Context) ([]*relationtb.Conversation, error)
|
||||
// GetConversationNotReceiveMessageUserIDs gets user IDs for users in a conversation who have not received messages.
|
||||
GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error)
|
||||
// GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error)
|
||||
// FindRecvMsgNotNotifyUserIDs(ctx context.Context, groupID string) ([]string, error)
|
||||
}
|
||||
|
||||
func NewConversationDatabase(conversation relationtb.ConversationModelInterface, cache cache.ConversationCache, tx tx.Tx) ConversationDatabase {
|
||||
func NewConversationDatabase(conversation database.Conversation, cache cache.ConversationCache, tx tx.Tx) ConversationDatabase {
|
||||
return &conversationDatabase{
|
||||
conversationDB: conversation,
|
||||
cache: cache,
|
||||
@ -76,14 +77,14 @@ func NewConversationDatabase(conversation relationtb.ConversationModelInterface,
|
||||
}
|
||||
|
||||
type conversationDatabase struct {
|
||||
conversationDB relationtb.ConversationModelInterface
|
||||
conversationDB database.Conversation
|
||||
cache cache.ConversationCache
|
||||
tx tx.Tx
|
||||
}
|
||||
|
||||
func (c *conversationDatabase) SetUsersConversationFieldTx(ctx context.Context, userIDs []string, conversation *relationtb.ConversationModel, fieldMap map[string]any) (err error) {
|
||||
func (c *conversationDatabase) SetUsersConversationFieldTx(ctx context.Context, userIDs []string, conversation *relationtb.Conversation, fieldMap map[string]any) (err error) {
|
||||
return c.tx.Transaction(ctx, func(ctx context.Context) error {
|
||||
cache := c.cache.NewCache()
|
||||
cache := c.cache.CloneConversationCache()
|
||||
if conversation.GroupID != "" {
|
||||
cache = cache.DelSuperGroupRecvMsgNotNotifyUserIDs(conversation.GroupID).DelSuperGroupRecvMsgNotNotifyUserIDsHash(conversation.GroupID)
|
||||
}
|
||||
@ -108,10 +109,10 @@ func (c *conversationDatabase) SetUsersConversationFieldTx(ctx context.Context,
|
||||
}
|
||||
NotUserIDs := stringutil.DifferenceString(haveUserIDs, userIDs)
|
||||
log.ZDebug(ctx, "SetUsersConversationFieldTx", "NotUserIDs", NotUserIDs, "haveUserIDs", haveUserIDs, "userIDs", userIDs)
|
||||
var conversations []*relationtb.ConversationModel
|
||||
var conversations []*relationtb.Conversation
|
||||
now := time.Now()
|
||||
for _, v := range NotUserIDs {
|
||||
temp := new(relationtb.ConversationModel)
|
||||
temp := new(relationtb.Conversation)
|
||||
if err = datautil.CopyStructFields(temp, conversation); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -126,7 +127,7 @@ func (c *conversationDatabase) SetUsersConversationFieldTx(ctx context.Context,
|
||||
}
|
||||
cache = cache.DelConversationIDs(NotUserIDs...).DelUserConversationIDsHash(NotUserIDs...).DelConversations(conversation.ConversationID, NotUserIDs...)
|
||||
}
|
||||
return cache.ExecDel(ctx)
|
||||
return cache.ChainExecDel(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
@ -135,31 +136,31 @@ func (c *conversationDatabase) UpdateUsersConversationField(ctx context.Context,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cache := c.cache.NewCache()
|
||||
cache := c.cache.CloneConversationCache()
|
||||
cache = cache.DelUsersConversation(conversationID, userIDs...)
|
||||
if _, ok := args["recv_msg_opt"]; ok {
|
||||
cache = cache.DelConversationNotReceiveMessageUserIDs(conversationID)
|
||||
}
|
||||
return cache.ExecDel(ctx)
|
||||
return cache.ChainExecDel(ctx)
|
||||
}
|
||||
|
||||
func (c *conversationDatabase) CreateConversation(ctx context.Context, conversations []*relationtb.ConversationModel) error {
|
||||
func (c *conversationDatabase) CreateConversation(ctx context.Context, conversations []*relationtb.Conversation) error {
|
||||
if err := c.conversationDB.Create(ctx, conversations); err != nil {
|
||||
return err
|
||||
}
|
||||
var userIDs []string
|
||||
cache := c.cache.NewCache()
|
||||
cache := c.cache.CloneConversationCache()
|
||||
for _, conversation := range conversations {
|
||||
cache = cache.DelConversations(conversation.OwnerUserID, conversation.ConversationID)
|
||||
cache = cache.DelConversationNotReceiveMessageUserIDs(conversation.ConversationID)
|
||||
userIDs = append(userIDs, conversation.OwnerUserID)
|
||||
}
|
||||
return cache.DelConversationIDs(userIDs...).DelUserConversationIDsHash(userIDs...).ExecDel(ctx)
|
||||
return cache.DelConversationIDs(userIDs...).DelUserConversationIDsHash(userIDs...).ChainExecDel(ctx)
|
||||
}
|
||||
|
||||
func (c *conversationDatabase) SyncPeerUserPrivateConversationTx(ctx context.Context, conversations []*relationtb.ConversationModel) error {
|
||||
func (c *conversationDatabase) SyncPeerUserPrivateConversationTx(ctx context.Context, conversations []*relationtb.Conversation) error {
|
||||
return c.tx.Transaction(ctx, func(ctx context.Context) error {
|
||||
cache := c.cache.NewCache()
|
||||
cache := c.cache.CloneConversationCache()
|
||||
for _, conversation := range conversations {
|
||||
for _, v := range [][2]string{{conversation.OwnerUserID, conversation.UserID}, {conversation.UserID, conversation.OwnerUserID}} {
|
||||
ownerUserID := v[0]
|
||||
@ -180,33 +181,33 @@ func (c *conversationDatabase) SyncPeerUserPrivateConversationTx(ctx context.Con
|
||||
newConversation.UserID = userID
|
||||
newConversation.ConversationID = conversation.ConversationID
|
||||
newConversation.IsPrivateChat = conversation.IsPrivateChat
|
||||
if err := c.conversationDB.Create(ctx, []*relationtb.ConversationModel{&newConversation}); err != nil {
|
||||
if err := c.conversationDB.Create(ctx, []*relationtb.Conversation{&newConversation}); err != nil {
|
||||
return err
|
||||
}
|
||||
cache = cache.DelConversationIDs(ownerUserID).DelUserConversationIDsHash(ownerUserID)
|
||||
}
|
||||
}
|
||||
}
|
||||
return cache.ExecDel(ctx)
|
||||
return cache.ChainExecDel(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *conversationDatabase) FindConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*relationtb.ConversationModel, error) {
|
||||
func (c *conversationDatabase) FindConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*relationtb.Conversation, error) {
|
||||
return c.cache.GetConversations(ctx, ownerUserID, conversationIDs)
|
||||
}
|
||||
|
||||
func (c *conversationDatabase) GetConversation(ctx context.Context, ownerUserID string, conversationID string) (*relationtb.ConversationModel, error) {
|
||||
func (c *conversationDatabase) GetConversation(ctx context.Context, ownerUserID string, conversationID string) (*relationtb.Conversation, error) {
|
||||
return c.cache.GetConversation(ctx, ownerUserID, conversationID)
|
||||
}
|
||||
|
||||
func (c *conversationDatabase) GetUserAllConversation(ctx context.Context, ownerUserID string) ([]*relationtb.ConversationModel, error) {
|
||||
func (c *conversationDatabase) GetUserAllConversation(ctx context.Context, ownerUserID string) ([]*relationtb.Conversation, error) {
|
||||
return c.cache.GetUserAllConversations(ctx, ownerUserID)
|
||||
}
|
||||
|
||||
func (c *conversationDatabase) SetUserConversations(ctx context.Context, ownerUserID string, conversations []*relationtb.ConversationModel) error {
|
||||
func (c *conversationDatabase) SetUserConversations(ctx context.Context, ownerUserID string, conversations []*relationtb.Conversation) error {
|
||||
return c.tx.Transaction(ctx, func(ctx context.Context) error {
|
||||
cache := c.cache.NewCache()
|
||||
groupIDs := datautil.Distinct(datautil.Filter(conversations, func(e *relationtb.ConversationModel) (string, bool) {
|
||||
cache := c.cache.CloneConversationCache()
|
||||
groupIDs := datautil.Distinct(datautil.Filter(conversations, func(e *relationtb.Conversation) (string, bool) {
|
||||
return e.GroupID, e.GroupID != ""
|
||||
}))
|
||||
for _, groupID := range groupIDs {
|
||||
@ -234,7 +235,7 @@ func (c *conversationDatabase) SetUserConversations(ctx context.Context, ownerUs
|
||||
existConversationIDs = append(existConversationIDs, conversation.ConversationID)
|
||||
}
|
||||
|
||||
var notExistConversations []*relationtb.ConversationModel
|
||||
var notExistConversations []*relationtb.Conversation
|
||||
for _, conversation := range conversations {
|
||||
if !datautil.Contain(conversation.ConversationID, existConversationIDs...) {
|
||||
notExistConversations = append(notExistConversations, conversation)
|
||||
@ -247,9 +248,9 @@ func (c *conversationDatabase) SetUserConversations(ctx context.Context, ownerUs
|
||||
}
|
||||
cache = cache.DelConversationIDs(ownerUserID).
|
||||
DelUserConversationIDsHash(ownerUserID).
|
||||
DelConversationNotReceiveMessageUserIDs(datautil.Slice(notExistConversations, func(e *relationtb.ConversationModel) string { return e.ConversationID })...)
|
||||
DelConversationNotReceiveMessageUserIDs(datautil.Slice(notExistConversations, func(e *relationtb.Conversation) string { return e.ConversationID })...)
|
||||
}
|
||||
return cache.ExecDel(ctx)
|
||||
return cache.ChainExecDel(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
@ -259,16 +260,16 @@ func (c *conversationDatabase) SetUserConversations(ctx context.Context, ownerUs
|
||||
|
||||
func (c *conversationDatabase) CreateGroupChatConversation(ctx context.Context, groupID string, userIDs []string) error {
|
||||
return c.tx.Transaction(ctx, func(ctx context.Context) error {
|
||||
cache := c.cache.NewCache()
|
||||
cache := c.cache.CloneConversationCache()
|
||||
conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID)
|
||||
existConversationUserIDs, err := c.conversationDB.FindUserID(ctx, userIDs, []string{conversationID})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notExistUserIDs := stringutil.DifferenceString(userIDs, existConversationUserIDs)
|
||||
var conversations []*relationtb.ConversationModel
|
||||
var conversations []*relationtb.Conversation
|
||||
for _, v := range notExistUserIDs {
|
||||
conversation := relationtb.ConversationModel{ConversationType: constant.ReadGroupChatType, GroupID: groupID, OwnerUserID: v, ConversationID: conversationID}
|
||||
conversation := relationtb.Conversation{ConversationType: constant.ReadGroupChatType, GroupID: groupID, OwnerUserID: v, ConversationID: conversationID}
|
||||
conversations = append(conversations, &conversation)
|
||||
cache = cache.DelConversations(v, conversationID).DelConversationNotReceiveMessageUserIDs(conversationID)
|
||||
}
|
||||
@ -286,7 +287,7 @@ func (c *conversationDatabase) CreateGroupChatConversation(ctx context.Context,
|
||||
for _, v := range existConversationUserIDs {
|
||||
cache = cache.DelConversations(v, conversationID)
|
||||
}
|
||||
return cache.ExecDel(ctx)
|
||||
return cache.ChainExecDel(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
@ -310,11 +311,11 @@ func (c *conversationDatabase) PageConversationIDs(ctx context.Context, paginati
|
||||
return c.conversationDB.PageConversationIDs(ctx, pagination)
|
||||
}
|
||||
|
||||
func (c *conversationDatabase) GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*relationtb.ConversationModel, error) {
|
||||
func (c *conversationDatabase) GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*relationtb.Conversation, error) {
|
||||
return c.conversationDB.GetConversationsByConversationID(ctx, conversationIDs)
|
||||
}
|
||||
|
||||
func (c *conversationDatabase) GetConversationIDsNeedDestruct(ctx context.Context) ([]*relationtb.ConversationModel, error) {
|
||||
func (c *conversationDatabase) GetConversationIDsNeedDestruct(ctx context.Context) ([]*relationtb.Conversation, error) {
|
||||
return c.conversationDB.GetConversationIDsNeedDestruct(ctx)
|
||||
}
|
||||
|
||||
@ -12,4 +12,4 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package controller // import "github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
package controller // import "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
@ -17,10 +17,12 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/tools/db/pagination"
|
||||
"github.com/openimsdk/tools/db/tx"
|
||||
@ -37,14 +39,14 @@ type FriendDatabase interface {
|
||||
// AddFriendRequest adds or updates a friend request
|
||||
AddFriendRequest(ctx context.Context, fromUserID, toUserID string, reqMsg string, ex string) (err error)
|
||||
|
||||
// BecomeFriends first checks if the users are already in the friends table; if not, it inserts them as friends
|
||||
// BecomeFriends first checks if the users are already in the friends model; if not, it inserts them as friends
|
||||
BecomeFriends(ctx context.Context, ownerUserID string, friendUserIDs []string, addSource int32) (err error)
|
||||
|
||||
// RefuseFriendRequest refuses a friend request
|
||||
RefuseFriendRequest(ctx context.Context, friendRequest *relation.FriendRequestModel) (err error)
|
||||
RefuseFriendRequest(ctx context.Context, friendRequest *model.FriendRequest) (err error)
|
||||
|
||||
// AgreeFriendRequest accepts a friend request
|
||||
AgreeFriendRequest(ctx context.Context, friendRequest *relation.FriendRequestModel) (err error)
|
||||
AgreeFriendRequest(ctx context.Context, friendRequest *model.FriendRequest) (err error)
|
||||
|
||||
// Delete removes a friend or friends from the owner's friend list
|
||||
Delete(ctx context.Context, ownerUserID string, friendUserIDs []string) (err error)
|
||||
@ -53,38 +55,38 @@ type FriendDatabase interface {
|
||||
UpdateRemark(ctx context.Context, ownerUserID, friendUserID, remark string) (err error)
|
||||
|
||||
// PageOwnerFriends retrieves the friend list of ownerUserID with pagination
|
||||
PageOwnerFriends(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (total int64, friends []*relation.FriendModel, err error)
|
||||
PageOwnerFriends(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (total int64, friends []*model.Friend, err error)
|
||||
|
||||
// PageInWhoseFriends finds the users who have friendUserID in their friend list with pagination
|
||||
PageInWhoseFriends(ctx context.Context, friendUserID string, pagination pagination.Pagination) (total int64, friends []*relation.FriendModel, err error)
|
||||
PageInWhoseFriends(ctx context.Context, friendUserID string, pagination pagination.Pagination) (total int64, friends []*model.Friend, err error)
|
||||
|
||||
// PageFriendRequestFromMe retrieves the friend requests sent by the user with pagination
|
||||
PageFriendRequestFromMe(ctx context.Context, userID string, pagination pagination.Pagination) (total int64, friends []*relation.FriendRequestModel, err error)
|
||||
PageFriendRequestFromMe(ctx context.Context, userID string, pagination pagination.Pagination) (total int64, friends []*model.FriendRequest, err error)
|
||||
|
||||
// PageFriendRequestToMe retrieves the friend requests received by the user with pagination
|
||||
PageFriendRequestToMe(ctx context.Context, userID string, pagination pagination.Pagination) (total int64, friends []*relation.FriendRequestModel, err error)
|
||||
PageFriendRequestToMe(ctx context.Context, userID string, pagination pagination.Pagination) (total int64, friends []*model.FriendRequest, err error)
|
||||
|
||||
// FindFriendsWithError fetches specified friends of a user and returns an error if any do not exist
|
||||
FindFriendsWithError(ctx context.Context, ownerUserID string, friendUserIDs []string) (friends []*relation.FriendModel, err error)
|
||||
FindFriendsWithError(ctx context.Context, ownerUserID string, friendUserIDs []string) (friends []*model.Friend, err error)
|
||||
|
||||
// FindFriendUserIDs retrieves the friend IDs of a user
|
||||
FindFriendUserIDs(ctx context.Context, ownerUserID string) (friendUserIDs []string, err error)
|
||||
|
||||
// FindBothFriendRequests finds friend requests sent and received
|
||||
FindBothFriendRequests(ctx context.Context, fromUserID, toUserID string) (friends []*relation.FriendRequestModel, err error)
|
||||
FindBothFriendRequests(ctx context.Context, fromUserID, toUserID string) (friends []*model.FriendRequest, err error)
|
||||
|
||||
// UpdateFriends updates fields for friends
|
||||
UpdateFriends(ctx context.Context, ownerUserID string, friendUserIDs []string, val map[string]any) (err error)
|
||||
}
|
||||
|
||||
type friendDatabase struct {
|
||||
friend relation.FriendModelInterface
|
||||
friendRequest relation.FriendRequestModelInterface
|
||||
friend database.Friend
|
||||
friendRequest database.FriendRequest
|
||||
tx tx.Tx
|
||||
cache cache.FriendCache
|
||||
}
|
||||
|
||||
func NewFriendDatabase(friend relation.FriendModelInterface, friendRequest relation.FriendRequestModelInterface, cache cache.FriendCache, tx tx.Tx) FriendDatabase {
|
||||
func NewFriendDatabase(friend database.Friend, friendRequest database.FriendRequest, cache cache.FriendCache, tx tx.Tx) FriendDatabase {
|
||||
return &friendDatabase{friend: friend, friendRequest: friendRequest, cache: cache, tx: tx}
|
||||
}
|
||||
|
||||
@ -124,10 +126,10 @@ func (f *friendDatabase) AddFriendRequest(ctx context.Context, fromUserID, toUse
|
||||
m["ex"] = ex
|
||||
m["create_time"] = time.Now()
|
||||
return f.friendRequest.UpdateByMap(ctx, fromUserID, toUserID, m)
|
||||
case relation.IsNotFound(err):
|
||||
case mgo.IsNotFound(err):
|
||||
return f.friendRequest.Create(
|
||||
ctx,
|
||||
[]*relation.FriendRequestModel{{FromUserID: fromUserID, ToUserID: toUserID, ReqMsg: reqMsg, Ex: ex, CreateTime: time.Now(), HandleTime: time.Unix(0, 0)}},
|
||||
[]*model.FriendRequest{{FromUserID: fromUserID, ToUserID: toUserID, ReqMsg: reqMsg, Ex: ex, CreateTime: time.Now(), HandleTime: time.Unix(0, 0)}},
|
||||
)
|
||||
default:
|
||||
return err
|
||||
@ -138,7 +140,7 @@ func (f *friendDatabase) AddFriendRequest(ctx context.Context, fromUserID, toUse
|
||||
// (1) First determine whether it is in the friends list (in or out does not return an error) (2) for not in the friends list can be inserted.
|
||||
func (f *friendDatabase) BecomeFriends(ctx context.Context, ownerUserID string, friendUserIDs []string, addSource int32) (err error) {
|
||||
return f.tx.Transaction(ctx, func(ctx context.Context) error {
|
||||
cache := f.cache.NewCache()
|
||||
cache := f.cache.CloneFriendCache()
|
||||
// user find friends
|
||||
fs1, err := f.friend.FindFriends(ctx, ownerUserID, friendUserIDs)
|
||||
if err != nil {
|
||||
@ -146,9 +148,9 @@ func (f *friendDatabase) BecomeFriends(ctx context.Context, ownerUserID string,
|
||||
}
|
||||
opUserID := mcontext.GetOperationID(ctx)
|
||||
for _, v := range friendUserIDs {
|
||||
fs1 = append(fs1, &relation.FriendModel{OwnerUserID: ownerUserID, FriendUserID: v, AddSource: addSource, OperatorUserID: opUserID})
|
||||
fs1 = append(fs1, &model.Friend{OwnerUserID: ownerUserID, FriendUserID: v, AddSource: addSource, OperatorUserID: opUserID})
|
||||
}
|
||||
fs11 := datautil.DistinctAny(fs1, func(e *relation.FriendModel) string {
|
||||
fs11 := datautil.DistinctAny(fs1, func(e *model.Friend) string {
|
||||
return e.FriendUserID
|
||||
})
|
||||
|
||||
@ -162,10 +164,10 @@ func (f *friendDatabase) BecomeFriends(ctx context.Context, ownerUserID string,
|
||||
}
|
||||
var newFriendIDs []string
|
||||
for _, v := range friendUserIDs {
|
||||
fs2 = append(fs2, &relation.FriendModel{OwnerUserID: v, FriendUserID: ownerUserID, AddSource: addSource, OperatorUserID: opUserID})
|
||||
fs2 = append(fs2, &model.Friend{OwnerUserID: v, FriendUserID: ownerUserID, AddSource: addSource, OperatorUserID: opUserID})
|
||||
newFriendIDs = append(newFriendIDs, v)
|
||||
}
|
||||
fs22 := datautil.DistinctAny(fs2, func(e *relation.FriendModel) string {
|
||||
fs22 := datautil.DistinctAny(fs2, func(e *model.Friend) string {
|
||||
return e.OwnerUserID
|
||||
})
|
||||
err = f.friend.Create(ctx, fs22)
|
||||
@ -174,14 +176,14 @@ func (f *friendDatabase) BecomeFriends(ctx context.Context, ownerUserID string,
|
||||
}
|
||||
newFriendIDs = append(newFriendIDs, ownerUserID)
|
||||
cache = cache.DelFriendIDs(newFriendIDs...)
|
||||
return cache.ExecDel(ctx)
|
||||
return cache.ChainExecDel(ctx)
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
// RefuseFriendRequest rejects a friend request. It first checks for an existing, unprocessed request.
|
||||
// If no such request exists, it returns an error. Otherwise, it marks the request as refused.
|
||||
func (f *friendDatabase) RefuseFriendRequest(ctx context.Context, friendRequest *relation.FriendRequestModel) error {
|
||||
func (f *friendDatabase) RefuseFriendRequest(ctx context.Context, friendRequest *model.FriendRequest) error {
|
||||
// Attempt to retrieve the friend request from the database.
|
||||
fr, err := f.friendRequest.Take(ctx, friendRequest.FromUserID, friendRequest.ToUserID)
|
||||
if err != nil {
|
||||
@ -210,7 +212,7 @@ func (f *friendDatabase) RefuseFriendRequest(ctx context.Context, friendRequest
|
||||
}
|
||||
|
||||
// AgreeFriendRequest accepts a friend request. It first checks for an existing, unprocessed request.
|
||||
func (f *friendDatabase) AgreeFriendRequest(ctx context.Context, friendRequest *relation.FriendRequestModel) (err error) {
|
||||
func (f *friendDatabase) AgreeFriendRequest(ctx context.Context, friendRequest *model.FriendRequest) (err error) {
|
||||
return f.tx.Transaction(ctx, func(ctx context.Context) error {
|
||||
now := time.Now()
|
||||
fr, err := f.friendRequest.Take(ctx, friendRequest.FromUserID, friendRequest.ToUserID)
|
||||
@ -237,7 +239,7 @@ func (f *friendDatabase) AgreeFriendRequest(ctx context.Context, friendRequest *
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err != nil && (!relation.IsNotFound(err)) {
|
||||
} else if err != nil && (!mgo.IsNotFound(err)) {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -245,14 +247,14 @@ func (f *friendDatabase) AgreeFriendRequest(ctx context.Context, friendRequest *
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
existsMap := datautil.SliceSet(datautil.Slice(exists, func(friend *relation.FriendModel) [2]string {
|
||||
existsMap := datautil.SliceSet(datautil.Slice(exists, func(friend *model.Friend) [2]string {
|
||||
return [...]string{friend.OwnerUserID, friend.FriendUserID} // My - Friend
|
||||
}))
|
||||
var adds []*relation.FriendModel
|
||||
var adds []*model.Friend
|
||||
if _, ok := existsMap[[...]string{friendRequest.ToUserID, friendRequest.FromUserID}]; !ok { // My - Friend
|
||||
adds = append(
|
||||
adds,
|
||||
&relation.FriendModel{
|
||||
&model.Friend{
|
||||
OwnerUserID: friendRequest.ToUserID,
|
||||
FriendUserID: friendRequest.FromUserID,
|
||||
AddSource: int32(constant.BecomeFriendByApply),
|
||||
@ -263,7 +265,7 @@ func (f *friendDatabase) AgreeFriendRequest(ctx context.Context, friendRequest *
|
||||
if _, ok := existsMap[[...]string{friendRequest.FromUserID, friendRequest.ToUserID}]; !ok { // My - Friend
|
||||
adds = append(
|
||||
adds,
|
||||
&relation.FriendModel{
|
||||
&model.Friend{
|
||||
OwnerUserID: friendRequest.FromUserID,
|
||||
FriendUserID: friendRequest.ToUserID,
|
||||
AddSource: int32(constant.BecomeFriendByApply),
|
||||
@ -276,7 +278,7 @@ func (f *friendDatabase) AgreeFriendRequest(ctx context.Context, friendRequest *
|
||||
return err
|
||||
}
|
||||
}
|
||||
return f.cache.DelFriendIDs(friendRequest.ToUserID, friendRequest.FromUserID).ExecDel(ctx)
|
||||
return f.cache.DelFriendIDs(friendRequest.ToUserID, friendRequest.FromUserID).ChainExecDel(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
@ -285,7 +287,7 @@ func (f *friendDatabase) Delete(ctx context.Context, ownerUserID string, friendU
|
||||
if err := f.friend.Delete(ctx, ownerUserID, friendUserIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
return f.cache.DelFriendIDs(append(friendUserIDs, ownerUserID)...).ExecDel(ctx)
|
||||
return f.cache.DelFriendIDs(append(friendUserIDs, ownerUserID)...).ChainExecDel(ctx)
|
||||
}
|
||||
|
||||
// UpdateRemark updates the remark for a friend. Zero value for remark is also supported.
|
||||
@ -293,31 +295,31 @@ func (f *friendDatabase) UpdateRemark(ctx context.Context, ownerUserID, friendUs
|
||||
if err := f.friend.UpdateRemark(ctx, ownerUserID, friendUserID, remark); err != nil {
|
||||
return err
|
||||
}
|
||||
return f.cache.DelFriend(ownerUserID, friendUserID).ExecDel(ctx)
|
||||
return f.cache.DelFriend(ownerUserID, friendUserID).ChainExecDel(ctx)
|
||||
}
|
||||
|
||||
// PageOwnerFriends retrieves the list of friends for the ownerUserID. It does not return an error if the result is empty.
|
||||
func (f *friendDatabase) PageOwnerFriends(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (total int64, friends []*relation.FriendModel, err error) {
|
||||
func (f *friendDatabase) PageOwnerFriends(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (total int64, friends []*model.Friend, err error) {
|
||||
return f.friend.FindOwnerFriends(ctx, ownerUserID, pagination)
|
||||
}
|
||||
|
||||
// PageInWhoseFriends identifies in whose friend lists the friendUserID appears.
|
||||
func (f *friendDatabase) PageInWhoseFriends(ctx context.Context, friendUserID string, pagination pagination.Pagination) (total int64, friends []*relation.FriendModel, err error) {
|
||||
func (f *friendDatabase) PageInWhoseFriends(ctx context.Context, friendUserID string, pagination pagination.Pagination) (total int64, friends []*model.Friend, err error) {
|
||||
return f.friend.FindInWhoseFriends(ctx, friendUserID, pagination)
|
||||
}
|
||||
|
||||
// PageFriendRequestFromMe retrieves friend requests sent by me. It does not return an error if the result is empty.
|
||||
func (f *friendDatabase) PageFriendRequestFromMe(ctx context.Context, userID string, pagination pagination.Pagination) (total int64, friends []*relation.FriendRequestModel, err error) {
|
||||
func (f *friendDatabase) PageFriendRequestFromMe(ctx context.Context, userID string, pagination pagination.Pagination) (total int64, friends []*model.FriendRequest, err error) {
|
||||
return f.friendRequest.FindFromUserID(ctx, userID, pagination)
|
||||
}
|
||||
|
||||
// PageFriendRequestToMe retrieves friend requests received by me. It does not return an error if the result is empty.
|
||||
func (f *friendDatabase) PageFriendRequestToMe(ctx context.Context, userID string, pagination pagination.Pagination) (total int64, friends []*relation.FriendRequestModel, err error) {
|
||||
func (f *friendDatabase) PageFriendRequestToMe(ctx context.Context, userID string, pagination pagination.Pagination) (total int64, friends []*model.FriendRequest, err error) {
|
||||
return f.friendRequest.FindToUserID(ctx, userID, pagination)
|
||||
}
|
||||
|
||||
// FindFriendsWithError retrieves specified friends' information for ownerUserID. Returns an error if any friend does not exist.
|
||||
func (f *friendDatabase) FindFriendsWithError(ctx context.Context, ownerUserID string, friendUserIDs []string) (friends []*relation.FriendModel, err error) {
|
||||
func (f *friendDatabase) FindFriendsWithError(ctx context.Context, ownerUserID string, friendUserIDs []string) (friends []*model.Friend, err error) {
|
||||
friends, err = f.friend.FindFriends(ctx, ownerUserID, friendUserIDs)
|
||||
if err != nil {
|
||||
return
|
||||
@ -332,7 +334,7 @@ func (f *friendDatabase) FindFriendUserIDs(ctx context.Context, ownerUserID stri
|
||||
return f.cache.GetFriendIDs(ctx, ownerUserID)
|
||||
}
|
||||
|
||||
func (f *friendDatabase) FindBothFriendRequests(ctx context.Context, fromUserID, toUserID string) (friends []*relation.FriendRequestModel, err error) {
|
||||
func (f *friendDatabase) FindBothFriendRequests(ctx context.Context, fromUserID, toUserID string) (friends []*model.FriendRequest, err error) {
|
||||
return f.friendRequest.FindBothFriendRequests(ctx, fromUserID, toUserID)
|
||||
}
|
||||
func (f *friendDatabase) UpdateFriends(ctx context.Context, ownerUserID string, friendUserIDs []string, val map[string]any) (err error) {
|
||||
@ -342,5 +344,5 @@ func (f *friendDatabase) UpdateFriends(ctx context.Context, ownerUserID string,
|
||||
if err := f.friend.UpdateFriends(ctx, ownerUserID, friendUserIDs, val); err != nil {
|
||||
return err
|
||||
}
|
||||
return f.cache.DelFriends(ownerUserID, friendUserIDs).ExecDel(ctx)
|
||||
return f.cache.DelFriends(ownerUserID, friendUserIDs).ChainExecDel(ctx)
|
||||
}
|
||||
@ -17,11 +17,13 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/common"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"time"
|
||||
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/tools/db/pagination"
|
||||
"github.com/openimsdk/tools/db/tx"
|
||||
@ -31,32 +33,32 @@ import (
|
||||
|
||||
type GroupDatabase interface {
|
||||
// CreateGroup creates new groups along with their members.
|
||||
CreateGroup(ctx context.Context, groups []*relationtb.GroupModel, groupMembers []*relationtb.GroupMemberModel) error
|
||||
CreateGroup(ctx context.Context, groups []*model.Group, groupMembers []*model.GroupMember) error
|
||||
// TakeGroup retrieves a single group by its ID.
|
||||
TakeGroup(ctx context.Context, groupID string) (group *relationtb.GroupModel, err error)
|
||||
TakeGroup(ctx context.Context, groupID string) (group *model.Group, err error)
|
||||
// FindGroup retrieves multiple groups by their IDs.
|
||||
FindGroup(ctx context.Context, groupIDs []string) (groups []*relationtb.GroupModel, err error)
|
||||
FindGroup(ctx context.Context, groupIDs []string) (groups []*model.Group, err error)
|
||||
// SearchGroup searches for groups based on a keyword and pagination settings, returns total count and groups.
|
||||
SearchGroup(ctx context.Context, keyword string, pagination pagination.Pagination) (int64, []*relationtb.GroupModel, error)
|
||||
SearchGroup(ctx context.Context, keyword string, pagination pagination.Pagination) (int64, []*model.Group, error)
|
||||
// UpdateGroup updates the properties of a group identified by its ID.
|
||||
UpdateGroup(ctx context.Context, groupID string, data map[string]any) error
|
||||
// DismissGroup disbands a group and optionally removes its members based on the deleteMember flag.
|
||||
DismissGroup(ctx context.Context, groupID string, deleteMember bool) error
|
||||
|
||||
// TakeGroupMember retrieves a specific group member by group ID and user ID.
|
||||
TakeGroupMember(ctx context.Context, groupID string, userID string) (groupMember *relationtb.GroupMemberModel, err error)
|
||||
TakeGroupMember(ctx context.Context, groupID string, userID string) (groupMember *model.GroupMember, err error)
|
||||
// TakeGroupOwner retrieves the owner of a group by group ID.
|
||||
TakeGroupOwner(ctx context.Context, groupID string) (*relationtb.GroupMemberModel, error)
|
||||
TakeGroupOwner(ctx context.Context, groupID string) (*model.GroupMember, error)
|
||||
// FindGroupMembers retrieves members of a group filtered by user IDs.
|
||||
FindGroupMembers(ctx context.Context, groupID string, userIDs []string) (groupMembers []*relationtb.GroupMemberModel, err error)
|
||||
FindGroupMembers(ctx context.Context, groupID string, userIDs []string) (groupMembers []*model.GroupMember, err error)
|
||||
// FindGroupMemberUser retrieves groups that a user is a member of, filtered by group IDs.
|
||||
FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) (groupMembers []*relationtb.GroupMemberModel, err error)
|
||||
FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) (groupMembers []*model.GroupMember, err error)
|
||||
// FindGroupMemberRoleLevels retrieves group members filtered by their role levels within a group.
|
||||
FindGroupMemberRoleLevels(ctx context.Context, groupID string, roleLevels []int32) (groupMembers []*relationtb.GroupMemberModel, err error)
|
||||
FindGroupMemberRoleLevels(ctx context.Context, groupID string, roleLevels []int32) (groupMembers []*model.GroupMember, err error)
|
||||
// FindGroupMemberAll retrieves all members of a group.
|
||||
FindGroupMemberAll(ctx context.Context, groupID string) (groupMembers []*relationtb.GroupMemberModel, err error)
|
||||
FindGroupMemberAll(ctx context.Context, groupID string) (groupMembers []*model.GroupMember, err error)
|
||||
// FindGroupsOwner retrieves the owners for multiple groups.
|
||||
FindGroupsOwner(ctx context.Context, groupIDs []string) ([]*relationtb.GroupMemberModel, error)
|
||||
FindGroupsOwner(ctx context.Context, groupIDs []string) ([]*model.GroupMember, error)
|
||||
// FindGroupMemberUserID retrieves the user IDs of all members in a group.
|
||||
FindGroupMemberUserID(ctx context.Context, groupID string) ([]string, error)
|
||||
// FindGroupMemberNum retrieves the number of members in a group.
|
||||
@ -64,22 +66,22 @@ type GroupDatabase interface {
|
||||
// FindUserManagedGroupID retrieves group IDs managed by a user.
|
||||
FindUserManagedGroupID(ctx context.Context, userID string) (groupIDs []string, err error)
|
||||
// PageGroupRequest paginates through group requests for specified groups.
|
||||
PageGroupRequest(ctx context.Context, groupIDs []string, pagination pagination.Pagination) (int64, []*relationtb.GroupRequestModel, error)
|
||||
PageGroupRequest(ctx context.Context, groupIDs []string, pagination pagination.Pagination) (int64, []*model.GroupRequest, error)
|
||||
// GetGroupRoleLevelMemberIDs retrieves user IDs of group members with a specific role level.
|
||||
GetGroupRoleLevelMemberIDs(ctx context.Context, groupID string, roleLevel int32) ([]string, error)
|
||||
|
||||
// PageGetJoinGroup paginates through groups that a user has joined.
|
||||
PageGetJoinGroup(ctx context.Context, userID string, pagination pagination.Pagination) (total int64, totalGroupMembers []*relationtb.GroupMemberModel, err error)
|
||||
PageGetJoinGroup(ctx context.Context, userID string, pagination pagination.Pagination) (total int64, totalGroupMembers []*model.GroupMember, err error)
|
||||
// PageGetGroupMember paginates through members of a group.
|
||||
PageGetGroupMember(ctx context.Context, groupID string, pagination pagination.Pagination) (total int64, totalGroupMembers []*relationtb.GroupMemberModel, err error)
|
||||
PageGetGroupMember(ctx context.Context, groupID string, pagination pagination.Pagination) (total int64, totalGroupMembers []*model.GroupMember, err error)
|
||||
// SearchGroupMember searches for group members based on a keyword, group ID, and pagination settings.
|
||||
SearchGroupMember(ctx context.Context, keyword string, groupID string, pagination pagination.Pagination) (int64, []*relationtb.GroupMemberModel, error)
|
||||
SearchGroupMember(ctx context.Context, keyword string, groupID string, pagination pagination.Pagination) (int64, []*model.GroupMember, error)
|
||||
// HandlerGroupRequest processes a group join request with a specified result.
|
||||
HandlerGroupRequest(ctx context.Context, groupID string, userID string, handledMsg string, handleResult int32, member *relationtb.GroupMemberModel) error
|
||||
HandlerGroupRequest(ctx context.Context, groupID string, userID string, handledMsg string, handleResult int32, member *model.GroupMember) error
|
||||
// DeleteGroupMember removes specified users from a group.
|
||||
DeleteGroupMember(ctx context.Context, groupID string, userIDs []string) error
|
||||
// MapGroupMemberUserID maps group IDs to their members' simplified user IDs.
|
||||
MapGroupMemberUserID(ctx context.Context, groupIDs []string) (map[string]*relationtb.GroupSimpleUserID, error)
|
||||
MapGroupMemberUserID(ctx context.Context, groupIDs []string) (map[string]*common.GroupSimpleUserID, error)
|
||||
// MapGroupMemberNum maps group IDs to their member count.
|
||||
MapGroupMemberNum(ctx context.Context, groupIDs []string) (map[string]uint32, error)
|
||||
// TransferGroupOwner transfers the ownership of a group to another user.
|
||||
@ -87,16 +89,16 @@ type GroupDatabase interface {
|
||||
// UpdateGroupMember updates properties of a group member.
|
||||
UpdateGroupMember(ctx context.Context, groupID string, userID string, data map[string]any) error
|
||||
// UpdateGroupMembers batch updates properties of group members.
|
||||
UpdateGroupMembers(ctx context.Context, data []*relationtb.BatchUpdateGroupMember) error
|
||||
UpdateGroupMembers(ctx context.Context, data []*common.BatchUpdateGroupMember) error
|
||||
|
||||
// CreateGroupRequest creates new group join requests.
|
||||
CreateGroupRequest(ctx context.Context, requests []*relationtb.GroupRequestModel) error
|
||||
CreateGroupRequest(ctx context.Context, requests []*model.GroupRequest) error
|
||||
// TakeGroupRequest retrieves a specific group join request.
|
||||
TakeGroupRequest(ctx context.Context, groupID string, userID string) (*relationtb.GroupRequestModel, error)
|
||||
TakeGroupRequest(ctx context.Context, groupID string, userID string) (*model.GroupRequest, error)
|
||||
// FindGroupRequests retrieves multiple group join requests.
|
||||
FindGroupRequests(ctx context.Context, groupID string, userIDs []string) ([]*relationtb.GroupRequestModel, error)
|
||||
FindGroupRequests(ctx context.Context, groupID string, userIDs []string) ([]*model.GroupRequest, error)
|
||||
// PageGroupRequestUser paginates through group join requests made by a user.
|
||||
PageGroupRequestUser(ctx context.Context, userID string, pagination pagination.Pagination) (int64, []*relationtb.GroupRequestModel, error)
|
||||
PageGroupRequestUser(ctx context.Context, userID string, pagination pagination.Pagination) (int64, []*model.GroupRequest, error)
|
||||
|
||||
// CountTotal counts the total number of groups as of a certain date.
|
||||
CountTotal(ctx context.Context, before *time.Time) (count int64, err error)
|
||||
@ -109,49 +111,46 @@ type GroupDatabase interface {
|
||||
func NewGroupDatabase(
|
||||
rdb redis.UniversalClient,
|
||||
localCache *config.LocalCache,
|
||||
groupDB relationtb.GroupModelInterface,
|
||||
groupMemberDB relationtb.GroupMemberModelInterface,
|
||||
groupRequestDB relationtb.GroupRequestModelInterface,
|
||||
groupDB database.Group,
|
||||
groupMemberDB database.GroupMember,
|
||||
groupRequestDB database.GroupRequest,
|
||||
ctxTx tx.Tx,
|
||||
groupHash cache.GroupHash,
|
||||
) GroupDatabase {
|
||||
rcOptions := rockscache.NewDefaultOptions()
|
||||
rcOptions.StrongConsistency = true
|
||||
rcOptions.RandomExpireAdjustment = 0.2
|
||||
return &groupDatabase{
|
||||
groupDB: groupDB,
|
||||
groupMemberDB: groupMemberDB,
|
||||
groupRequestDB: groupRequestDB,
|
||||
ctxTx: ctxTx,
|
||||
cache: cache.NewGroupCacheRedis(rdb, localCache, groupDB, groupMemberDB, groupRequestDB, groupHash, rcOptions),
|
||||
cache: redis2.NewGroupCacheRedis(rdb, localCache, groupDB, groupMemberDB, groupRequestDB, groupHash, redis2.GetRocksCacheOptions()),
|
||||
}
|
||||
}
|
||||
|
||||
type groupDatabase struct {
|
||||
groupDB relationtb.GroupModelInterface
|
||||
groupMemberDB relationtb.GroupMemberModelInterface
|
||||
groupRequestDB relationtb.GroupRequestModelInterface
|
||||
groupDB database.Group
|
||||
groupMemberDB database.GroupMember
|
||||
groupRequestDB database.GroupRequest
|
||||
ctxTx tx.Tx
|
||||
cache cache.GroupCache
|
||||
}
|
||||
|
||||
func (g *groupDatabase) FindGroupMembers(ctx context.Context, groupID string, userIDs []string) ([]*relationtb.GroupMemberModel, error) {
|
||||
func (g *groupDatabase) FindGroupMembers(ctx context.Context, groupID string, userIDs []string) ([]*model.GroupMember, error) {
|
||||
return g.cache.GetGroupMembersInfo(ctx, groupID, userIDs)
|
||||
}
|
||||
|
||||
func (g *groupDatabase) FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) ([]*relationtb.GroupMemberModel, error) {
|
||||
func (g *groupDatabase) FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) ([]*model.GroupMember, error) {
|
||||
return g.cache.FindGroupMemberUser(ctx, groupIDs, userID)
|
||||
}
|
||||
|
||||
func (g *groupDatabase) FindGroupMemberRoleLevels(ctx context.Context, groupID string, roleLevels []int32) ([]*relationtb.GroupMemberModel, error) {
|
||||
func (g *groupDatabase) FindGroupMemberRoleLevels(ctx context.Context, groupID string, roleLevels []int32) ([]*model.GroupMember, error) {
|
||||
return g.cache.GetGroupRolesLevelMemberInfo(ctx, groupID, roleLevels)
|
||||
}
|
||||
|
||||
func (g *groupDatabase) FindGroupMemberAll(ctx context.Context, groupID string) ([]*relationtb.GroupMemberModel, error) {
|
||||
func (g *groupDatabase) FindGroupMemberAll(ctx context.Context, groupID string) ([]*model.GroupMember, error) {
|
||||
return g.cache.GetAllGroupMembersInfo(ctx, groupID)
|
||||
}
|
||||
|
||||
func (g *groupDatabase) FindGroupsOwner(ctx context.Context, groupIDs []string) ([]*relationtb.GroupMemberModel, error) {
|
||||
func (g *groupDatabase) FindGroupsOwner(ctx context.Context, groupIDs []string) ([]*model.GroupMember, error) {
|
||||
return g.cache.GetGroupsOwner(ctx, groupIDs)
|
||||
}
|
||||
|
||||
@ -159,12 +158,12 @@ func (g *groupDatabase) GetGroupRoleLevelMemberIDs(ctx context.Context, groupID
|
||||
return g.cache.GetGroupRoleLevelMemberIDs(ctx, groupID, roleLevel)
|
||||
}
|
||||
|
||||
func (g *groupDatabase) CreateGroup(ctx context.Context, groups []*relationtb.GroupModel, groupMembers []*relationtb.GroupMemberModel) error {
|
||||
func (g *groupDatabase) CreateGroup(ctx context.Context, groups []*model.Group, groupMembers []*model.GroupMember) error {
|
||||
if len(groups)+len(groupMembers) == 0 {
|
||||
return nil
|
||||
}
|
||||
return g.ctxTx.Transaction(ctx, func(ctx context.Context) error {
|
||||
c := g.cache.NewCache()
|
||||
c := g.cache.CloneGroupCache()
|
||||
if len(groups) > 0 {
|
||||
if err := g.groupDB.Create(ctx, groups); err != nil {
|
||||
return err
|
||||
@ -191,7 +190,7 @@ func (g *groupDatabase) CreateGroup(ctx context.Context, groups []*relationtb.Gr
|
||||
DelGroupAllRoleLevel(groupMember.GroupID)
|
||||
}
|
||||
}
|
||||
return c.ExecDel(ctx, true)
|
||||
return c.ChainExecDel(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
@ -207,15 +206,15 @@ func (g *groupDatabase) FindGroupMemberNum(ctx context.Context, groupID string)
|
||||
return uint32(num), nil
|
||||
}
|
||||
|
||||
func (g *groupDatabase) TakeGroup(ctx context.Context, groupID string) (*relationtb.GroupModel, error) {
|
||||
func (g *groupDatabase) TakeGroup(ctx context.Context, groupID string) (*model.Group, error) {
|
||||
return g.cache.GetGroupInfo(ctx, groupID)
|
||||
}
|
||||
|
||||
func (g *groupDatabase) FindGroup(ctx context.Context, groupIDs []string) ([]*relationtb.GroupModel, error) {
|
||||
func (g *groupDatabase) FindGroup(ctx context.Context, groupIDs []string) ([]*model.Group, error) {
|
||||
return g.cache.GetGroupsInfo(ctx, groupIDs)
|
||||
}
|
||||
|
||||
func (g *groupDatabase) SearchGroup(ctx context.Context, keyword string, pagination pagination.Pagination) (int64, []*relationtb.GroupModel, error) {
|
||||
func (g *groupDatabase) SearchGroup(ctx context.Context, keyword string, pagination pagination.Pagination) (int64, []*model.Group, error) {
|
||||
return g.groupDB.Search(ctx, keyword, pagination)
|
||||
}
|
||||
|
||||
@ -223,12 +222,12 @@ func (g *groupDatabase) UpdateGroup(ctx context.Context, groupID string, data ma
|
||||
if err := g.groupDB.UpdateMap(ctx, groupID, data); err != nil {
|
||||
return err
|
||||
}
|
||||
return g.cache.DelGroupsInfo(groupID).ExecDel(ctx)
|
||||
return g.cache.DelGroupsInfo(groupID).ChainExecDel(ctx)
|
||||
}
|
||||
|
||||
func (g *groupDatabase) DismissGroup(ctx context.Context, groupID string, deleteMember bool) error {
|
||||
return g.ctxTx.Transaction(ctx, func(ctx context.Context) error {
|
||||
c := g.cache.NewCache()
|
||||
c := g.cache.CloneGroupCache()
|
||||
if err := g.groupDB.UpdateStatus(ctx, groupID, constant.GroupStatusDismissed); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -247,15 +246,15 @@ func (g *groupDatabase) DismissGroup(ctx context.Context, groupID string, delete
|
||||
DelGroupAllRoleLevel(groupID).
|
||||
DelGroupMembersInfo(groupID, userIDs...)
|
||||
}
|
||||
return c.DelGroupsInfo(groupID).ExecDel(ctx)
|
||||
return c.DelGroupsInfo(groupID).ChainExecDel(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *groupDatabase) TakeGroupMember(ctx context.Context, groupID string, userID string) (*relationtb.GroupMemberModel, error) {
|
||||
func (g *groupDatabase) TakeGroupMember(ctx context.Context, groupID string, userID string) (*model.GroupMember, error) {
|
||||
return g.cache.GetGroupMemberInfo(ctx, groupID, userID)
|
||||
}
|
||||
|
||||
func (g *groupDatabase) TakeGroupOwner(ctx context.Context, groupID string) (*relationtb.GroupMemberModel, error) {
|
||||
func (g *groupDatabase) TakeGroupOwner(ctx context.Context, groupID string) (*model.GroupMember, error) {
|
||||
return g.cache.GetGroupOwner(ctx, groupID)
|
||||
}
|
||||
|
||||
@ -263,11 +262,11 @@ func (g *groupDatabase) FindUserManagedGroupID(ctx context.Context, userID strin
|
||||
return g.groupMemberDB.FindUserManagedGroupID(ctx, userID)
|
||||
}
|
||||
|
||||
func (g *groupDatabase) PageGroupRequest(ctx context.Context, groupIDs []string, pagination pagination.Pagination) (int64, []*relationtb.GroupRequestModel, error) {
|
||||
func (g *groupDatabase) PageGroupRequest(ctx context.Context, groupIDs []string, pagination pagination.Pagination) (int64, []*model.GroupRequest, error) {
|
||||
return g.groupRequestDB.PageGroup(ctx, groupIDs, pagination)
|
||||
}
|
||||
|
||||
func (g *groupDatabase) PageGetJoinGroup(ctx context.Context, userID string, pagination pagination.Pagination) (total int64, totalGroupMembers []*relationtb.GroupMemberModel, err error) {
|
||||
func (g *groupDatabase) PageGetJoinGroup(ctx context.Context, userID string, pagination pagination.Pagination) (total int64, totalGroupMembers []*model.GroupMember, err error) {
|
||||
groupIDs, err := g.cache.GetJoinedGroupIDs(ctx, userID)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
@ -282,7 +281,7 @@ func (g *groupDatabase) PageGetJoinGroup(ctx context.Context, userID string, pag
|
||||
return int64(len(groupIDs)), totalGroupMembers, nil
|
||||
}
|
||||
|
||||
func (g *groupDatabase) PageGetGroupMember(ctx context.Context, groupID string, pagination pagination.Pagination) (total int64, totalGroupMembers []*relationtb.GroupMemberModel, err error) {
|
||||
func (g *groupDatabase) PageGetGroupMember(ctx context.Context, groupID string, pagination pagination.Pagination) (total int64, totalGroupMembers []*model.GroupMember, err error) {
|
||||
groupMemberIDs, err := g.cache.GetGroupMemberIDs(ctx, groupID)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
@ -298,26 +297,27 @@ func (g *groupDatabase) PageGetGroupMember(ctx context.Context, groupID string,
|
||||
return int64(len(groupMemberIDs)), members, nil
|
||||
}
|
||||
|
||||
func (g *groupDatabase) SearchGroupMember(ctx context.Context, keyword string, groupID string, pagination pagination.Pagination) (int64, []*relationtb.GroupMemberModel, error) {
|
||||
func (g *groupDatabase) SearchGroupMember(ctx context.Context, keyword string, groupID string, pagination pagination.Pagination) (int64, []*model.GroupMember, error) {
|
||||
return g.groupMemberDB.SearchMember(ctx, keyword, groupID, pagination)
|
||||
}
|
||||
|
||||
func (g *groupDatabase) HandlerGroupRequest(ctx context.Context, groupID string, userID string, handledMsg string, handleResult int32, member *relationtb.GroupMemberModel) error {
|
||||
func (g *groupDatabase) HandlerGroupRequest(ctx context.Context, groupID string, userID string, handledMsg string, handleResult int32, member *model.GroupMember) error {
|
||||
return g.ctxTx.Transaction(ctx, func(ctx context.Context) error {
|
||||
if err := g.groupRequestDB.UpdateHandler(ctx, groupID, userID, handledMsg, handleResult); err != nil {
|
||||
return err
|
||||
}
|
||||
if member != nil {
|
||||
if err := g.groupMemberDB.Create(ctx, []*relationtb.GroupMemberModel{member}); err != nil {
|
||||
c := g.cache.CloneGroupCache()
|
||||
if err := g.groupMemberDB.Create(ctx, []*model.GroupMember{member}); err != nil {
|
||||
return err
|
||||
}
|
||||
c := g.cache.DelGroupMembersHash(groupID).
|
||||
c = c.DelGroupMembersHash(groupID).
|
||||
DelGroupMembersInfo(groupID, member.UserID).
|
||||
DelGroupMemberIDs(groupID).
|
||||
DelGroupsMemberNum(groupID).
|
||||
DelJoinedGroupID(member.UserID).
|
||||
DelGroupRoleLevel(groupID, []int32{member.RoleLevel})
|
||||
if err := c.ExecDel(ctx); err != nil {
|
||||
if err := c.ChainExecDel(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -329,16 +329,17 @@ func (g *groupDatabase) DeleteGroupMember(ctx context.Context, groupID string, u
|
||||
if err := g.groupMemberDB.Delete(ctx, groupID, userIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
return g.cache.DelGroupMembersHash(groupID).
|
||||
c := g.cache.CloneGroupCache()
|
||||
return c.DelGroupMembersHash(groupID).
|
||||
DelGroupMemberIDs(groupID).
|
||||
DelGroupsMemberNum(groupID).
|
||||
DelJoinedGroupID(userIDs...).
|
||||
DelGroupMembersInfo(groupID, userIDs...).
|
||||
DelGroupAllRoleLevel(groupID).
|
||||
ExecDel(ctx)
|
||||
ChainExecDel(ctx)
|
||||
}
|
||||
|
||||
func (g *groupDatabase) MapGroupMemberUserID(ctx context.Context, groupIDs []string) (map[string]*relationtb.GroupSimpleUserID, error) {
|
||||
func (g *groupDatabase) MapGroupMemberUserID(ctx context.Context, groupIDs []string) (map[string]*common.GroupSimpleUserID, error) {
|
||||
return g.cache.GetGroupMemberHashMap(ctx, groupIDs)
|
||||
}
|
||||
|
||||
@ -362,9 +363,10 @@ func (g *groupDatabase) TransferGroupOwner(ctx context.Context, groupID string,
|
||||
if err := g.groupMemberDB.UpdateRoleLevel(ctx, groupID, newOwnerUserID, constant.GroupOwner); err != nil {
|
||||
return err
|
||||
}
|
||||
return g.cache.DelGroupMembersInfo(groupID, oldOwnerUserID, newOwnerUserID).
|
||||
c := g.cache.CloneGroupCache()
|
||||
return c.DelGroupMembersInfo(groupID, oldOwnerUserID, newOwnerUserID).
|
||||
DelGroupAllRoleLevel(groupID).
|
||||
DelGroupMembersHash(groupID).ExecDel(ctx)
|
||||
DelGroupMembersHash(groupID).ChainExecDel(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
@ -372,16 +374,17 @@ func (g *groupDatabase) UpdateGroupMember(ctx context.Context, groupID string, u
|
||||
if err := g.groupMemberDB.Update(ctx, groupID, userID, data); err != nil {
|
||||
return err
|
||||
}
|
||||
c := g.cache.DelGroupMembersInfo(groupID, userID)
|
||||
c := g.cache.CloneGroupCache()
|
||||
c = c.DelGroupMembersInfo(groupID, userID)
|
||||
if g.groupMemberDB.IsUpdateRoleLevel(data) {
|
||||
c = c.DelGroupAllRoleLevel(groupID)
|
||||
}
|
||||
return c.ExecDel(ctx)
|
||||
return c.ChainExecDel(ctx)
|
||||
}
|
||||
|
||||
func (g *groupDatabase) UpdateGroupMembers(ctx context.Context, data []*relationtb.BatchUpdateGroupMember) error {
|
||||
func (g *groupDatabase) UpdateGroupMembers(ctx context.Context, data []*common.BatchUpdateGroupMember) error {
|
||||
return g.ctxTx.Transaction(ctx, func(ctx context.Context) error {
|
||||
c := g.cache.NewCache()
|
||||
c := g.cache.CloneGroupCache()
|
||||
for _, item := range data {
|
||||
if err := g.groupMemberDB.Update(ctx, item.GroupID, item.UserID, item.Map); err != nil {
|
||||
return err
|
||||
@ -391,11 +394,11 @@ func (g *groupDatabase) UpdateGroupMembers(ctx context.Context, data []*relation
|
||||
}
|
||||
c = c.DelGroupMembersInfo(item.GroupID, item.UserID).DelGroupMembersHash(item.GroupID)
|
||||
}
|
||||
return c.ExecDel(ctx, true)
|
||||
return c.ChainExecDel(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *groupDatabase) CreateGroupRequest(ctx context.Context, requests []*relationtb.GroupRequestModel) error {
|
||||
func (g *groupDatabase) CreateGroupRequest(ctx context.Context, requests []*model.GroupRequest) error {
|
||||
return g.ctxTx.Transaction(ctx, func(ctx context.Context) error {
|
||||
for _, request := range requests {
|
||||
if err := g.groupRequestDB.Delete(ctx, request.GroupID, request.UserID); err != nil {
|
||||
@ -410,11 +413,11 @@ func (g *groupDatabase) TakeGroupRequest(
|
||||
ctx context.Context,
|
||||
groupID string,
|
||||
userID string,
|
||||
) (*relationtb.GroupRequestModel, error) {
|
||||
) (*model.GroupRequest, error) {
|
||||
return g.groupRequestDB.Take(ctx, groupID, userID)
|
||||
}
|
||||
|
||||
func (g *groupDatabase) PageGroupRequestUser(ctx context.Context, userID string, pagination pagination.Pagination) (int64, []*relationtb.GroupRequestModel, error) {
|
||||
func (g *groupDatabase) PageGroupRequestUser(ctx context.Context, userID string, pagination pagination.Pagination) (int64, []*model.GroupRequest, error) {
|
||||
return g.groupRequestDB.Page(ctx, userID, pagination)
|
||||
}
|
||||
|
||||
@ -426,7 +429,7 @@ func (g *groupDatabase) CountRangeEverydayTotal(ctx context.Context, start time.
|
||||
return g.groupDB.CountRangeEverydayTotal(ctx, start, end)
|
||||
}
|
||||
|
||||
func (g *groupDatabase) FindGroupRequests(ctx context.Context, groupID string, userIDs []string) ([]*relationtb.GroupRequestModel, error) {
|
||||
func (g *groupDatabase) FindGroupRequests(ctx context.Context, groupID string, userIDs []string) ([]*model.GroupRequest, error) {
|
||||
return g.groupRequestDB.FindGroupRequests(ctx, groupID, userIDs)
|
||||
}
|
||||
|
||||
@ -434,9 +437,9 @@ func (g *groupDatabase) DeleteGroupMemberHash(ctx context.Context, groupIDs []st
|
||||
if len(groupIDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
c := g.cache.NewCache()
|
||||
c := g.cache.CloneGroupCache()
|
||||
for _, groupID := range groupIDs {
|
||||
c = c.DelGroupMembersHash(groupID)
|
||||
}
|
||||
return c.ExecDel(ctx)
|
||||
return c.ChainExecDel(ctx)
|
||||
}
|
||||
@ -18,14 +18,15 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
pbmsg "github.com/openimsdk/protocol/msg"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
@ -48,13 +49,11 @@ type CommonMsgDatabase interface {
|
||||
// BatchInsertChat2DB inserts a batch of messages into the database for a specific conversation.
|
||||
BatchInsertChat2DB(ctx context.Context, conversationID string, msgs []*sdkws.MsgData, currentMaxSeq int64) error
|
||||
// RevokeMsg revokes a message in a conversation.
|
||||
RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *relation.RevokeModel) error
|
||||
RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *model.RevokeModel) error
|
||||
// MarkSingleChatMsgsAsRead marks messages as read for a single chat by sequence numbers.
|
||||
MarkSingleChatMsgsAsRead(ctx context.Context, userID string, conversationID string, seqs []int64) error
|
||||
// DeleteMessagesFromCache deletes message caches from Redis by sequence numbers.
|
||||
DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error
|
||||
// DelUserDeleteMsgsList deletes user's message deletion list.
|
||||
DelUserDeleteMsgsList(ctx context.Context, conversationID string, seqs []int64)
|
||||
// BatchInsertChat2Cache increments the sequence number and then batch inserts messages into the cache.
|
||||
BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNewConversation bool, err error)
|
||||
// GetMsgBySeqsRange retrieves messages from MongoDB by a range of sequence numbers.
|
||||
@ -97,20 +96,19 @@ type CommonMsgDatabase interface {
|
||||
|
||||
// to mq
|
||||
MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error
|
||||
MsgToModifyMQ(ctx context.Context, key, conversarionID string, msgs []*sdkws.MsgData) error
|
||||
MsgToPushMQ(ctx context.Context, key, conversarionID string, msg2mq *sdkws.MsgData) (int32, int64, error)
|
||||
MsgToMongoMQ(ctx context.Context, key, conversarionID string, msgs []*sdkws.MsgData, lastSeq int64) error
|
||||
|
||||
RangeUserSendCount(ctx context.Context, start time.Time, end time.Time, group bool, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, users []*relation.UserCount, dateCount map[string]int64, err error)
|
||||
RangeGroupSendCount(ctx context.Context, start time.Time, end time.Time, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, groups []*relation.GroupCount, dateCount map[string]int64, err error)
|
||||
RangeUserSendCount(ctx context.Context, start time.Time, end time.Time, group bool, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error)
|
||||
RangeGroupSendCount(ctx context.Context, start time.Time, end time.Time, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, groups []*model.GroupCount, dateCount map[string]int64, err error)
|
||||
ConvertMsgsDocLen(ctx context.Context, conversationIDs []string)
|
||||
|
||||
// clear msg
|
||||
GetBeforeMsg(ctx context.Context, ts int64, limit int) ([]*relation.MsgDocModel, error)
|
||||
DeleteDocMsgBefore(ctx context.Context, ts int64, doc *relation.MsgDocModel) ([]int, error)
|
||||
GetBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error)
|
||||
DeleteDocMsgBefore(ctx context.Context, ts int64, doc *model.MsgDocModel) ([]int, error)
|
||||
}
|
||||
|
||||
func NewCommonMsgDatabase(msgDocModel relation.MsgDocModelInterface, msg cache.MsgCache, seq cache.SeqCache, kafkaConf *config.Kafka) (CommonMsgDatabase, error) {
|
||||
func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seq cache.SeqCache, kafkaConf *config.Kafka) (CommonMsgDatabase, error) {
|
||||
conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -138,7 +136,7 @@ func NewCommonMsgDatabase(msgDocModel relation.MsgDocModelInterface, msg cache.M
|
||||
}
|
||||
|
||||
//func InitCommonMsgDatabase(rdb redis.UniversalClient, database *mongo.Database, config *tools.CronTaskConfig) (CommonMsgDatabase, error) {
|
||||
// msgDocModel, err := mgo.NewMsgMongo(database)
|
||||
// msgDocModel, err := database.NewMsgMongo(database)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
@ -149,14 +147,13 @@ func NewCommonMsgDatabase(msgDocModel relation.MsgDocModelInterface, msg cache.M
|
||||
//}
|
||||
|
||||
type commonMsgDatabase struct {
|
||||
msgDocDatabase relation.MsgDocModelInterface
|
||||
msgTable relation.MsgDocModel
|
||||
msg cache.MsgCache
|
||||
seq cache.SeqCache
|
||||
producer *kafka.Producer
|
||||
producerToMongo *kafka.Producer
|
||||
producerToModify *kafka.Producer
|
||||
producerToPush *kafka.Producer
|
||||
msgDocDatabase database.Msg
|
||||
msgTable model.MsgDocModel
|
||||
msg cache.MsgCache
|
||||
seq cache.SeqCache
|
||||
producer *kafka.Producer
|
||||
producerToMongo *kafka.Producer
|
||||
producerToPush *kafka.Producer
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error {
|
||||
@ -164,14 +161,6 @@ func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sd
|
||||
return err
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) MsgToModifyMQ(ctx context.Context, key, conversationID string, messages []*sdkws.MsgData) error {
|
||||
if len(messages) > 0 {
|
||||
_, _, err := db.producerToModify.SendMessage(ctx, key, &pbmsg.MsgDataToModifyByMQ{ConversationID: conversationID, Messages: messages})
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error) {
|
||||
partition, offset, err := db.producerToPush.SendMessage(ctx, key, &pbmsg.PushMsgDataToMQ{MsgData: msg2mq, ConversationID: conversationID})
|
||||
if err != nil {
|
||||
@ -199,13 +188,13 @@ func (db *commonMsgDatabase) BatchInsertBlock(ctx context.Context, conversationI
|
||||
var ok bool
|
||||
switch key {
|
||||
case updateKeyMsg:
|
||||
var msg *relation.MsgDataModel
|
||||
msg, ok = field.(*relation.MsgDataModel)
|
||||
var msg *model.MsgDataModel
|
||||
msg, ok = field.(*model.MsgDataModel)
|
||||
if msg != nil && msg.Seq != firstSeq+int64(i) {
|
||||
return errs.ErrInternalServer.WrapMsg("seq is invalid")
|
||||
}
|
||||
case updateKeyRevoke:
|
||||
_, ok = field.(*relation.RevokeModel)
|
||||
_, ok = field.(*model.RevokeModel)
|
||||
default:
|
||||
return errs.ErrInternalServer.WrapMsg("key is invalid")
|
||||
}
|
||||
@ -245,9 +234,9 @@ func (db *commonMsgDatabase) BatchInsertBlock(ctx context.Context, conversationI
|
||||
continue // The current data has been updated, skip the current data
|
||||
}
|
||||
}
|
||||
doc := relation.MsgDocModel{
|
||||
doc := model.MsgDocModel{
|
||||
DocID: db.msgTable.GetDocID(conversationID, seq),
|
||||
Msg: make([]*relation.MsgInfoModel, num),
|
||||
Msg: make([]*model.MsgInfoModel, num),
|
||||
}
|
||||
var insert int // Inserted data number
|
||||
for j := i; j < len(fields); j++ {
|
||||
@ -258,21 +247,21 @@ func (db *commonMsgDatabase) BatchInsertBlock(ctx context.Context, conversationI
|
||||
insert++
|
||||
switch key {
|
||||
case updateKeyMsg:
|
||||
doc.Msg[db.msgTable.GetMsgIndex(seq)] = &relation.MsgInfoModel{
|
||||
Msg: fields[j].(*relation.MsgDataModel),
|
||||
doc.Msg[db.msgTable.GetMsgIndex(seq)] = &model.MsgInfoModel{
|
||||
Msg: fields[j].(*model.MsgDataModel),
|
||||
}
|
||||
case updateKeyRevoke:
|
||||
doc.Msg[db.msgTable.GetMsgIndex(seq)] = &relation.MsgInfoModel{
|
||||
Revoke: fields[j].(*relation.RevokeModel),
|
||||
doc.Msg[db.msgTable.GetMsgIndex(seq)] = &model.MsgInfoModel{
|
||||
Revoke: fields[j].(*model.RevokeModel),
|
||||
}
|
||||
}
|
||||
}
|
||||
for i, model := range doc.Msg {
|
||||
if model == nil {
|
||||
model = &relation.MsgInfoModel{}
|
||||
doc.Msg[i] = model
|
||||
for i, msgInfo := range doc.Msg {
|
||||
if msgInfo == nil {
|
||||
msgInfo = &model.MsgInfoModel{}
|
||||
doc.Msg[i] = msgInfo
|
||||
}
|
||||
if model.DelList == nil {
|
||||
if msgInfo.DelList == nil {
|
||||
doc.Msg[i].DelList = []string{}
|
||||
}
|
||||
}
|
||||
@ -299,9 +288,9 @@ func (db *commonMsgDatabase) BatchInsertChat2DB(ctx context.Context, conversatio
|
||||
if msg == nil {
|
||||
continue
|
||||
}
|
||||
var offlinePushModel *relation.OfflinePushModel
|
||||
var offlinePushModel *model.OfflinePushModel
|
||||
if msg.OfflinePushInfo != nil {
|
||||
offlinePushModel = &relation.OfflinePushModel{
|
||||
offlinePushModel = &model.OfflinePushModel{
|
||||
Title: msg.OfflinePushInfo.Title,
|
||||
Desc: msg.OfflinePushInfo.Desc,
|
||||
Ex: msg.OfflinePushInfo.Ex,
|
||||
@ -309,7 +298,7 @@ func (db *commonMsgDatabase) BatchInsertChat2DB(ctx context.Context, conversatio
|
||||
IOSBadgeCount: msg.OfflinePushInfo.IOSBadgeCount,
|
||||
}
|
||||
}
|
||||
msgs[i] = &relation.MsgDataModel{
|
||||
msgs[i] = &model.MsgDataModel{
|
||||
SendID: msg.SendID,
|
||||
RecvID: msg.RecvID,
|
||||
GroupID: msg.GroupID,
|
||||
@ -336,7 +325,7 @@ func (db *commonMsgDatabase) BatchInsertChat2DB(ctx context.Context, conversatio
|
||||
return db.BatchInsertBlock(ctx, conversationID, msgs, updateKeyMsg, msgList[0].Seq)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *relation.RevokeModel) error {
|
||||
func (db *commonMsgDatabase) RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *model.RevokeModel) error {
|
||||
return db.BatchInsertBlock(ctx, conversationID, []any{revoke}, updateKeyRevoke, seq)
|
||||
}
|
||||
|
||||
@ -356,17 +345,13 @@ func (db *commonMsgDatabase) MarkSingleChatMsgsAsRead(ctx context.Context, userI
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error {
|
||||
return db.msg.DeleteMessages(ctx, conversationID, seqs)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) DelUserDeleteMsgsList(ctx context.Context, conversationID string, seqs []int64) {
|
||||
db.msg.DelUserDeleteMsgsList(ctx, conversationID, seqs)
|
||||
return db.msg.DeleteMessagesFromCache(ctx, conversationID, seqs)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) {
|
||||
currentMaxSeq, err := db.seq.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil && errs.Unwrap(err) != redis.Nil {
|
||||
log.ZError(ctx, "db.seq.GetMaxSeq", err)
|
||||
log.ZError(ctx, "storage.seq.GetMaxSeq", err)
|
||||
return 0, false, err
|
||||
}
|
||||
lenList := len(msgs)
|
||||
@ -387,7 +372,7 @@ func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversa
|
||||
userSeqMap[m.SendID] = m.Seq
|
||||
}
|
||||
|
||||
failedNum, err := db.msg.SetMessageToCache(ctx, conversationID, msgs)
|
||||
failedNum, err := db.msg.SetMessagesToCache(ctx, conversationID, msgs)
|
||||
if err != nil {
|
||||
prommetrics.MsgInsertRedisFailedCounter.Add(float64(failedNum))
|
||||
log.ZError(ctx, "setMessageToCache error", err, "len", len(msgs), "conversationID", conversationID)
|
||||
@ -397,7 +382,7 @@ func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversa
|
||||
|
||||
err = db.seq.SetMaxSeq(ctx, conversationID, currentMaxSeq)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "db.seq.SetMaxSeq error", err, "conversationID", conversationID)
|
||||
log.ZError(ctx, "storage.seq.SetMaxSeq error", err, "conversationID", conversationID)
|
||||
prommetrics.SeqSetFailedCounter.Inc()
|
||||
}
|
||||
|
||||
@ -423,7 +408,7 @@ func (db *commonMsgDatabase) getMsgBySeqs(ctx context.Context, userID, conversat
|
||||
return totalMsgs, nil
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) handlerDBMsg(ctx context.Context, cache map[int64][]*relation.MsgInfoModel, userID, conversationID string, msg *relation.MsgInfoModel) {
|
||||
func (db *commonMsgDatabase) handlerDBMsg(ctx context.Context, cache map[int64][]*model.MsgInfoModel, userID, conversationID string, msg *model.MsgInfoModel) {
|
||||
if msg.IsRead {
|
||||
msg.Msg.IsRead = true
|
||||
}
|
||||
@ -445,7 +430,7 @@ func (db *commonMsgDatabase) handlerDBMsg(ctx context.Context, cache map[int64][
|
||||
if quoteMsg.QuoteMessage == nil || quoteMsg.QuoteMessage.ContentType == constant.MsgRevokeNotification {
|
||||
return
|
||||
}
|
||||
var msgs []*relation.MsgInfoModel
|
||||
var msgs []*model.MsgInfoModel
|
||||
if v, ok := cache[quoteMsg.QuoteMessage.Seq]; ok {
|
||||
msgs = v
|
||||
} else {
|
||||
@ -479,12 +464,12 @@ func (db *commonMsgDatabase) handlerDBMsg(ctx context.Context, cache map[int64][
|
||||
}
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) findMsgInfoBySeq(ctx context.Context, userID, docID string, conversationID string, seqs []int64) (totalMsgs []*relation.MsgInfoModel, err error) {
|
||||
func (db *commonMsgDatabase) findMsgInfoBySeq(ctx context.Context, userID, docID string, conversationID string, seqs []int64) (totalMsgs []*model.MsgInfoModel, err error) {
|
||||
msgs, err := db.msgDocDatabase.GetMsgBySeqIndexIn1Doc(ctx, userID, docID, seqs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tempCache := make(map[int64][]*relation.MsgInfoModel)
|
||||
tempCache := make(map[int64][]*model.MsgInfoModel)
|
||||
for _, msg := range msgs {
|
||||
db.handlerDBMsg(ctx, tempCache, userID, conversationID, msg)
|
||||
}
|
||||
@ -583,6 +568,7 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
|
||||
}
|
||||
newBegin := seqs[0]
|
||||
newEnd := seqs[len(seqs)-1]
|
||||
var successMsgs []*sdkws.MsgData
|
||||
log.ZDebug(ctx, "GetMsgBySeqsRange", "first seqs", seqs, "newBegin", newBegin, "newEnd", newEnd)
|
||||
cachedMsgs, failedSeqs, err := db.msg.GetMessagesBySeq(ctx, conversationID, seqs)
|
||||
if err != nil {
|
||||
@ -591,54 +577,12 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
|
||||
log.ZError(ctx, "get message from redis exception", err, "conversationID", conversationID, "seqs", seqs)
|
||||
}
|
||||
}
|
||||
var successMsgs []*sdkws.MsgData
|
||||
if len(cachedMsgs) > 0 {
|
||||
delSeqs, err := db.msg.GetUserDelList(ctx, userID, conversationID)
|
||||
if err != nil && errs.Unwrap(err) != redis.Nil {
|
||||
return 0, 0, nil, err
|
||||
}
|
||||
var cacheDelNum int
|
||||
for _, msg := range cachedMsgs {
|
||||
if !datautil.Contain(msg.Seq, delSeqs...) {
|
||||
successMsgs = append(successMsgs, msg)
|
||||
} else {
|
||||
cacheDelNum += 1
|
||||
}
|
||||
}
|
||||
log.ZDebug(ctx, "get delSeqs from redis", "delSeqs", delSeqs, "userID", userID, "conversationID", conversationID, "cacheDelNum", cacheDelNum)
|
||||
var reGetSeqsCache []int64
|
||||
for i := 1; i <= cacheDelNum; {
|
||||
newSeq := newBegin - int64(i)
|
||||
if newSeq >= begin {
|
||||
if !datautil.Contain(newSeq, delSeqs...) {
|
||||
log.ZDebug(ctx, "seq del in cache, a new seq in range append", "new seq", newSeq)
|
||||
reGetSeqsCache = append(reGetSeqsCache, newSeq)
|
||||
i++
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(reGetSeqsCache) > 0 {
|
||||
log.ZDebug(ctx, "reGetSeqsCache", "reGetSeqsCache", reGetSeqsCache)
|
||||
cachedMsgs, failedSeqs2, err := db.msg.GetMessagesBySeq(ctx, conversationID, reGetSeqsCache)
|
||||
if err != nil {
|
||||
if err != redis.Nil {
|
||||
|
||||
log.ZError(ctx, "get message from redis exception", err, "conversationID", conversationID, "seqs", reGetSeqsCache)
|
||||
}
|
||||
}
|
||||
failedSeqs = append(failedSeqs, failedSeqs2...)
|
||||
successMsgs = append(successMsgs, cachedMsgs...)
|
||||
}
|
||||
}
|
||||
log.ZDebug(ctx, "get msgs from cache", "successMsgs", successMsgs)
|
||||
if len(failedSeqs) != 0 {
|
||||
log.ZDebug(ctx, "msgs not exist in redis", "seqs", failedSeqs)
|
||||
}
|
||||
successMsgs = append(successMsgs, cachedMsgs...)
|
||||
log.ZDebug(ctx, "get msgs from cache", "cachedMsgs", cachedMsgs)
|
||||
// get from cache or db
|
||||
|
||||
if len(failedSeqs) > 0 {
|
||||
log.ZDebug(ctx, "msgs not exist in redis", "seqs", failedSeqs)
|
||||
mongoMsgs, err := db.getMsgBySeqsRange(ctx, userID, conversationID, failedSeqs, begin, end)
|
||||
if err != nil {
|
||||
|
||||
@ -704,12 +648,6 @@ func (db *commonMsgDatabase) DeleteConversationMsgsAndSetMinSeq(ctx context.Cont
|
||||
if minSeq == 0 {
|
||||
return nil
|
||||
}
|
||||
if remainTime == 0 {
|
||||
err = db.msg.CleanUpOneConversationAllMsg(ctx, conversationID)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "CleanUpOneUserAllMsg", err, "conversationID", conversationID)
|
||||
}
|
||||
}
|
||||
return db.seq.SetMinSeq(ctx, conversationID, minSeq)
|
||||
}
|
||||
|
||||
@ -720,7 +658,7 @@ func (db *commonMsgDatabase) UserMsgsDestruct(ctx context.Context, userID string
|
||||
msgDocModel, err := db.msgDocDatabase.GetMsgDocModelByIndex(ctx, conversationID, index, 1)
|
||||
if err != nil || msgDocModel.DocID == "" {
|
||||
if err != nil {
|
||||
if err == relation.ErrMsgListNotExist {
|
||||
if err == model.ErrMsgListNotExist {
|
||||
log.ZDebug(ctx, "not doc find", "conversationID", conversationID, "userID", userID, "index", index)
|
||||
} else {
|
||||
log.ZError(ctx, "deleteMsgRecursion GetUserMsgListByIndex failed", err, "conversationID", conversationID, "index", index)
|
||||
@ -787,7 +725,7 @@ func (db *commonMsgDatabase) deleteMsgRecursion(ctx context.Context, conversatio
|
||||
msgDocModel, err := db.msgDocDatabase.GetMsgDocModelByIndex(ctx, conversationID, index, 1)
|
||||
if err != nil || msgDocModel.DocID == "" {
|
||||
if err != nil {
|
||||
if err == relation.ErrMsgListNotExist {
|
||||
if err == model.ErrMsgListNotExist {
|
||||
log.ZDebug(ctx, "deleteMsgRecursion ErrMsgListNotExist", "conversationID", conversationID, "index:", index)
|
||||
} else {
|
||||
log.ZError(ctx, "deleteMsgRecursion GetUserMsgListByIndex failed", err, "conversationID", conversationID, "index", index)
|
||||
@ -829,7 +767,7 @@ func (db *commonMsgDatabase) deleteMsgRecursion(ctx context.Context, conversatio
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) DeleteMsgsPhysicalBySeqs(ctx context.Context, conversationID string, allSeqs []int64) error {
|
||||
if err := db.msg.DeleteMessages(ctx, conversationID, allSeqs); err != nil {
|
||||
if err := db.msg.DeleteMessagesFromCache(ctx, conversationID, allSeqs); err != nil {
|
||||
return err
|
||||
}
|
||||
for docID, seqs := range db.msgTable.GetDocIDSeqsMap(conversationID, allSeqs) {
|
||||
@ -845,21 +783,9 @@ func (db *commonMsgDatabase) DeleteMsgsPhysicalBySeqs(ctx context.Context, conve
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) DeleteUserMsgsBySeqs(ctx context.Context, userID string, conversationID string, seqs []int64) error {
|
||||
cachedMsgs, _, err := db.msg.GetMessagesBySeq(ctx, conversationID, seqs)
|
||||
if err != nil && errs.Unwrap(err) != redis.Nil {
|
||||
log.ZWarn(ctx, "DeleteUserMsgsBySeqs", err, "conversationID", conversationID, "seqs", seqs)
|
||||
if err := db.msg.DeleteMessagesFromCache(ctx, conversationID, seqs); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(cachedMsgs) > 0 {
|
||||
var cacheSeqs []int64
|
||||
for _, msg := range cachedMsgs {
|
||||
cacheSeqs = append(cacheSeqs, msg.Seq)
|
||||
}
|
||||
if err := db.msg.UserDeleteMsgs(ctx, conversationID, cacheSeqs, userID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for docID, seqs := range db.msgTable.GetDocIDSeqsMap(conversationID, seqs) {
|
||||
for _, seq := range seqs {
|
||||
if _, err := db.msgDocDatabase.PushUnique(ctx, docID, db.msgTable.GetMsgIndex(seq), "del_list", []string{userID}); err != nil {
|
||||
@ -1005,7 +931,7 @@ func (db *commonMsgDatabase) RangeUserSendCount(
|
||||
ase bool,
|
||||
pageNumber int32,
|
||||
showNumber int32,
|
||||
) (msgCount int64, userCount int64, users []*relation.UserCount, dateCount map[string]int64, err error) {
|
||||
) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error) {
|
||||
return db.msgDocDatabase.RangeUserSendCount(ctx, start, end, group, ase, pageNumber, showNumber)
|
||||
}
|
||||
|
||||
@ -1016,7 +942,7 @@ func (db *commonMsgDatabase) RangeGroupSendCount(
|
||||
ase bool,
|
||||
pageNumber int32,
|
||||
showNumber int32,
|
||||
) (msgCount int64, userCount int64, groups []*relation.GroupCount, dateCount map[string]int64, err error) {
|
||||
) (msgCount int64, userCount int64, groups []*model.GroupCount, dateCount map[string]int64, err error) {
|
||||
return db.msgDocDatabase.RangeGroupSendCount(ctx, start, end, ase, pageNumber, showNumber)
|
||||
}
|
||||
|
||||
@ -1054,11 +980,11 @@ func (db *commonMsgDatabase) ConvertMsgsDocLen(ctx context.Context, conversation
|
||||
db.msgDocDatabase.ConvertMsgsDocLen(ctx, conversationIDs)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) GetBeforeMsg(ctx context.Context, ts int64, limit int) ([]*relation.MsgDocModel, error) {
|
||||
func (db *commonMsgDatabase) GetBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) {
|
||||
return db.msgDocDatabase.GetBeforeMsg(ctx, ts, limit)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) DeleteDocMsgBefore(ctx context.Context, ts int64, doc *relation.MsgDocModel) ([]int, error) {
|
||||
func (db *commonMsgDatabase) DeleteDocMsgBefore(ctx context.Context, ts int64, doc *model.MsgDocModel) ([]int, error) {
|
||||
var notNull int
|
||||
index := make([]int, 0, len(doc.Msg))
|
||||
for i, message := range doc.Msg {
|
||||
@ -17,7 +17,7 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
)
|
||||
|
||||
type PushDatabase interface {
|
||||
@ -16,11 +16,13 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/tools/s3"
|
||||
"github.com/openimsdk/tools/s3/cont"
|
||||
"github.com/redis/go-redis/v9"
|
||||
@ -33,15 +35,15 @@ type S3Database interface {
|
||||
InitiateMultipartUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*cont.InitiateUploadResult, error)
|
||||
CompleteMultipartUpload(ctx context.Context, uploadID string, parts []string) (*cont.UploadResult, error)
|
||||
AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (time.Time, string, error)
|
||||
SetObject(ctx context.Context, info *relation.ObjectModel) error
|
||||
SetObject(ctx context.Context, info *model.Object) error
|
||||
StatObject(ctx context.Context, name string) (*s3.ObjectInfo, error)
|
||||
FormData(ctx context.Context, name string, size int64, contentType string, duration time.Duration) (*s3.FormData, error)
|
||||
}
|
||||
|
||||
func NewS3Database(rdb redis.UniversalClient, s3 s3.Interface, obj relation.ObjectInfoModelInterface) S3Database {
|
||||
func NewS3Database(rdb redis.UniversalClient, s3 s3.Interface, obj database.ObjectInfo) S3Database {
|
||||
return &s3Database{
|
||||
s3: cont.New(cache.NewS3Cache(rdb, s3), s3),
|
||||
cache: cache.NewObjectCacheRedis(rdb, obj),
|
||||
s3: cont.New(redis2.NewS3Cache(rdb, s3), s3),
|
||||
cache: redis2.NewObjectCacheRedis(rdb, obj),
|
||||
db: obj,
|
||||
}
|
||||
}
|
||||
@ -49,7 +51,7 @@ func NewS3Database(rdb redis.UniversalClient, s3 s3.Interface, obj relation.Obje
|
||||
type s3Database struct {
|
||||
s3 *cont.Controller
|
||||
cache cache.ObjectCache
|
||||
db relation.ObjectInfoModelInterface
|
||||
db database.ObjectInfo
|
||||
}
|
||||
|
||||
func (s *s3Database) PartSize(ctx context.Context, size int64) (int64, error) {
|
||||
@ -72,12 +74,12 @@ func (s *s3Database) CompleteMultipartUpload(ctx context.Context, uploadID strin
|
||||
return s.s3.CompleteUpload(ctx, uploadID, parts)
|
||||
}
|
||||
|
||||
func (s *s3Database) SetObject(ctx context.Context, info *relation.ObjectModel) error {
|
||||
func (s *s3Database) SetObject(ctx context.Context, info *model.Object) error {
|
||||
info.Engine = s.s3.Engine()
|
||||
if err := s.db.SetObject(ctx, info); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.cache.DelObjectName(info.Engine, info.Name).ExecDel(ctx)
|
||||
return s.cache.DelObjectName(info.Engine, info.Name).ChainExecDel(ctx)
|
||||
}
|
||||
|
||||
func (s *s3Database) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (time.Time, string, error) {
|
||||
@ -16,10 +16,11 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/tools/db/pagination"
|
||||
)
|
||||
|
||||
@ -27,15 +28,15 @@ type ThirdDatabase interface {
|
||||
FcmUpdateToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) error
|
||||
SetAppBadge(ctx context.Context, userID string, value int) error
|
||||
// about log for debug
|
||||
UploadLogs(ctx context.Context, logs []*relation.LogModel) error
|
||||
UploadLogs(ctx context.Context, logs []*model.Log) error
|
||||
DeleteLogs(ctx context.Context, logID []string, userID string) error
|
||||
SearchLogs(ctx context.Context, keyword string, start time.Time, end time.Time, pagination pagination.Pagination) (int64, []*relation.LogModel, error)
|
||||
GetLogs(ctx context.Context, LogIDs []string, userID string) ([]*relation.LogModel, error)
|
||||
SearchLogs(ctx context.Context, keyword string, start time.Time, end time.Time, pagination pagination.Pagination) (int64, []*model.Log, error)
|
||||
GetLogs(ctx context.Context, LogIDs []string, userID string) ([]*model.Log, error)
|
||||
}
|
||||
|
||||
type thirdDatabase struct {
|
||||
cache cache.ThirdCache
|
||||
logdb relation.LogInterface
|
||||
logdb database.Log
|
||||
}
|
||||
|
||||
// DeleteLogs implements ThirdDatabase.
|
||||
@ -44,21 +45,21 @@ func (t *thirdDatabase) DeleteLogs(ctx context.Context, logID []string, userID s
|
||||
}
|
||||
|
||||
// GetLogs implements ThirdDatabase.
|
||||
func (t *thirdDatabase) GetLogs(ctx context.Context, LogIDs []string, userID string) ([]*relation.LogModel, error) {
|
||||
func (t *thirdDatabase) GetLogs(ctx context.Context, LogIDs []string, userID string) ([]*model.Log, error) {
|
||||
return t.logdb.Get(ctx, LogIDs, userID)
|
||||
}
|
||||
|
||||
// SearchLogs implements ThirdDatabase.
|
||||
func (t *thirdDatabase) SearchLogs(ctx context.Context, keyword string, start time.Time, end time.Time, pagination pagination.Pagination) (int64, []*relation.LogModel, error) {
|
||||
func (t *thirdDatabase) SearchLogs(ctx context.Context, keyword string, start time.Time, end time.Time, pagination pagination.Pagination) (int64, []*model.Log, error) {
|
||||
return t.logdb.Search(ctx, keyword, start, end, pagination)
|
||||
}
|
||||
|
||||
// UploadLogs implements ThirdDatabase.
|
||||
func (t *thirdDatabase) UploadLogs(ctx context.Context, logs []*relation.LogModel) error {
|
||||
func (t *thirdDatabase) UploadLogs(ctx context.Context, logs []*model.Log) error {
|
||||
return t.logdb.Create(ctx, logs)
|
||||
}
|
||||
|
||||
func NewThirdDatabase(cache cache.ThirdCache, logdb relation.LogInterface) ThirdDatabase {
|
||||
func NewThirdDatabase(cache cache.ThirdCache, logdb database.Log) ThirdDatabase {
|
||||
return &thirdDatabase{cache: cache, logdb: logdb}
|
||||
}
|
||||
|
||||
@ -16,6 +16,8 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/db/pagination"
|
||||
"github.com/openimsdk/tools/db/tx"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
@ -24,37 +26,36 @@ import (
|
||||
"github.com/openimsdk/protocol/user"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
)
|
||||
|
||||
type UserDatabase interface {
|
||||
// FindWithError Get the information of the specified user. If the userID is not found, it will also return an error
|
||||
FindWithError(ctx context.Context, userIDs []string) (users []*relation.UserModel, err error)
|
||||
FindWithError(ctx context.Context, userIDs []string) (users []*model.User, err error)
|
||||
// Find Get the information of the specified user If the userID is not found, no error will be returned
|
||||
Find(ctx context.Context, userIDs []string) (users []*relation.UserModel, err error)
|
||||
Find(ctx context.Context, userIDs []string) (users []*model.User, err error)
|
||||
// Find userInfo By Nickname
|
||||
FindByNickname(ctx context.Context, nickname string) (users []*relation.UserModel, err error)
|
||||
FindByNickname(ctx context.Context, nickname string) (users []*model.User, err error)
|
||||
// Find notificationAccounts
|
||||
FindNotification(ctx context.Context, level int64) (users []*relation.UserModel, err error)
|
||||
// Create Insert multiple external guarantees that the userID is not repeated and does not exist in the db
|
||||
Create(ctx context.Context, users []*relation.UserModel) (err error)
|
||||
FindNotification(ctx context.Context, level int64) (users []*model.User, err error)
|
||||
// Create Insert multiple external guarantees that the userID is not repeated and does not exist in the storage
|
||||
Create(ctx context.Context, users []*model.User) (err error)
|
||||
// UpdateByMap update (zero value) external guarantee userID exists
|
||||
UpdateByMap(ctx context.Context, userID string, args map[string]any) (err error)
|
||||
// FindUser
|
||||
PageFindUser(ctx context.Context, level1 int64, level2 int64, pagination pagination.Pagination) (count int64, users []*relation.UserModel, err error)
|
||||
PageFindUser(ctx context.Context, level1 int64, level2 int64, pagination pagination.Pagination) (count int64, users []*model.User, err error)
|
||||
// FindUser with keyword
|
||||
PageFindUserWithKeyword(ctx context.Context, level1 int64, level2 int64, userID string, nickName string, pagination pagination.Pagination) (count int64, users []*relation.UserModel, err error)
|
||||
PageFindUserWithKeyword(ctx context.Context, level1 int64, level2 int64, userID string, nickName string, pagination pagination.Pagination) (count int64, users []*model.User, err error)
|
||||
// Page If not found, no error is returned
|
||||
Page(ctx context.Context, pagination pagination.Pagination) (count int64, users []*relation.UserModel, err error)
|
||||
Page(ctx context.Context, pagination pagination.Pagination) (count int64, users []*model.User, err error)
|
||||
// IsExist true as long as one exists
|
||||
IsExist(ctx context.Context, userIDs []string) (exist bool, err error)
|
||||
// GetAllUserID Get all user IDs
|
||||
GetAllUserID(ctx context.Context, pagination pagination.Pagination) (int64, []string, error)
|
||||
// Get user by userID
|
||||
GetUserByID(ctx context.Context, userID string) (user *relation.UserModel, err error)
|
||||
// InitOnce Inside the function, first query whether it exists in the db, if it exists, do nothing; if it does not exist, insert it
|
||||
InitOnce(ctx context.Context, users []*relation.UserModel) (err error)
|
||||
GetUserByID(ctx context.Context, userID string) (user *model.User, err error)
|
||||
// InitOnce Inside the function, first query whether it exists in the storage, if it exists, do nothing; if it does not exist, insert it
|
||||
InitOnce(ctx context.Context, users []*model.User) (err error)
|
||||
// CountTotal Get the total number of users
|
||||
CountTotal(ctx context.Context, before *time.Time) (int64, error)
|
||||
// CountRangeEverydayTotal Get the user increment in the range
|
||||
@ -82,18 +83,18 @@ type UserDatabase interface {
|
||||
|
||||
type userDatabase struct {
|
||||
tx tx.Tx
|
||||
userDB relation.UserModelInterface
|
||||
userDB database.User
|
||||
cache cache.UserCache
|
||||
mongoDB relation.SubscribeUserModelInterface
|
||||
mongoDB database.SubscribeUser
|
||||
}
|
||||
|
||||
func NewUserDatabase(userDB relation.UserModelInterface, cache cache.UserCache, tx tx.Tx, mongoDB relation.SubscribeUserModelInterface) UserDatabase {
|
||||
func NewUserDatabase(userDB database.User, cache cache.UserCache, tx tx.Tx, mongoDB database.SubscribeUser) UserDatabase {
|
||||
return &userDatabase{userDB: userDB, cache: cache, tx: tx, mongoDB: mongoDB}
|
||||
}
|
||||
|
||||
func (u *userDatabase) InitOnce(ctx context.Context, users []*relation.UserModel) error {
|
||||
func (u *userDatabase) InitOnce(ctx context.Context, users []*model.User) error {
|
||||
// Extract user IDs from the given user models.
|
||||
userIDs := datautil.Slice(users, func(e *relation.UserModel) string {
|
||||
userIDs := datautil.Slice(users, func(e *model.User) string {
|
||||
return e.UserID
|
||||
})
|
||||
|
||||
@ -104,7 +105,7 @@ func (u *userDatabase) InitOnce(ctx context.Context, users []*relation.UserModel
|
||||
}
|
||||
|
||||
// Determine which users are missing from the database.
|
||||
missingUsers := datautil.SliceAnySub(users, existingUsers, func(e *relation.UserModel) string {
|
||||
missingUsers := datautil.SliceAnySub(users, existingUsers, func(e *model.User) string {
|
||||
return e.UserID
|
||||
})
|
||||
|
||||
@ -119,7 +120,7 @@ func (u *userDatabase) InitOnce(ctx context.Context, users []*relation.UserModel
|
||||
}
|
||||
|
||||
// FindWithError Get the information of the specified user and return an error if the userID is not found.
|
||||
func (u *userDatabase) FindWithError(ctx context.Context, userIDs []string) (users []*relation.UserModel, err error) {
|
||||
func (u *userDatabase) FindWithError(ctx context.Context, userIDs []string) (users []*model.User, err error) {
|
||||
users, err = u.cache.GetUsersInfo(ctx, userIDs)
|
||||
if err != nil {
|
||||
return
|
||||
@ -131,27 +132,27 @@ func (u *userDatabase) FindWithError(ctx context.Context, userIDs []string) (use
|
||||
}
|
||||
|
||||
// Find Get the information of the specified user. If the userID is not found, no error will be returned.
|
||||
func (u *userDatabase) Find(ctx context.Context, userIDs []string) (users []*relation.UserModel, err error) {
|
||||
func (u *userDatabase) Find(ctx context.Context, userIDs []string) (users []*model.User, err error) {
|
||||
return u.cache.GetUsersInfo(ctx, userIDs)
|
||||
}
|
||||
|
||||
func (u *userDatabase) FindByNickname(ctx context.Context, nickname string) (users []*relation.UserModel, err error) {
|
||||
func (u *userDatabase) FindByNickname(ctx context.Context, nickname string) (users []*model.User, err error) {
|
||||
return u.userDB.TakeByNickname(ctx, nickname)
|
||||
}
|
||||
|
||||
func (u *userDatabase) FindNotification(ctx context.Context, level int64) (users []*relation.UserModel, err error) {
|
||||
func (u *userDatabase) FindNotification(ctx context.Context, level int64) (users []*model.User, err error) {
|
||||
return u.userDB.TakeNotification(ctx, level)
|
||||
}
|
||||
|
||||
// Create Insert multiple external guarantees that the userID is not repeated and does not exist in the db.
|
||||
func (u *userDatabase) Create(ctx context.Context, users []*relation.UserModel) (err error) {
|
||||
// Create Insert multiple external guarantees that the userID is not repeated and does not exist in the storage.
|
||||
func (u *userDatabase) Create(ctx context.Context, users []*model.User) (err error) {
|
||||
return u.tx.Transaction(ctx, func(ctx context.Context) error {
|
||||
if err = u.userDB.Create(ctx, users); err != nil {
|
||||
return err
|
||||
}
|
||||
return u.cache.DelUsersInfo(datautil.Slice(users, func(e *relation.UserModel) string {
|
||||
return u.cache.DelUsersInfo(datautil.Slice(users, func(e *model.User) string {
|
||||
return e.UserID
|
||||
})...).ExecDel(ctx)
|
||||
})...).ChainExecDel(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
@ -161,20 +162,20 @@ func (u *userDatabase) UpdateByMap(ctx context.Context, userID string, args map[
|
||||
if err := u.userDB.UpdateByMap(ctx, userID, args); err != nil {
|
||||
return err
|
||||
}
|
||||
return u.cache.DelUsersInfo(userID).ExecDel(ctx)
|
||||
return u.cache.DelUsersInfo(userID).ChainExecDel(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
// Page Gets, returns no error if not found.
|
||||
func (u *userDatabase) Page(ctx context.Context, pagination pagination.Pagination) (count int64, users []*relation.UserModel, err error) {
|
||||
func (u *userDatabase) Page(ctx context.Context, pagination pagination.Pagination) (count int64, users []*model.User, err error) {
|
||||
return u.userDB.Page(ctx, pagination)
|
||||
}
|
||||
|
||||
func (u *userDatabase) PageFindUser(ctx context.Context, level1 int64, level2 int64, pagination pagination.Pagination) (count int64, users []*relation.UserModel, err error) {
|
||||
func (u *userDatabase) PageFindUser(ctx context.Context, level1 int64, level2 int64, pagination pagination.Pagination) (count int64, users []*model.User, err error) {
|
||||
return u.userDB.PageFindUser(ctx, level1, level2, pagination)
|
||||
}
|
||||
|
||||
func (u *userDatabase) PageFindUserWithKeyword(ctx context.Context, level1 int64, level2 int64, userID, nickName string, pagination pagination.Pagination) (count int64, users []*relation.UserModel, err error) {
|
||||
func (u *userDatabase) PageFindUserWithKeyword(ctx context.Context, level1 int64, level2 int64, userID, nickName string, pagination pagination.Pagination) (count int64, users []*model.User, err error) {
|
||||
return u.userDB.PageFindUserWithKeyword(ctx, level1, level2, userID, nickName, pagination)
|
||||
}
|
||||
|
||||
@ -195,7 +196,7 @@ func (u *userDatabase) GetAllUserID(ctx context.Context, pagination pagination.P
|
||||
return u.userDB.GetAllUserID(ctx, pagination)
|
||||
}
|
||||
|
||||
func (u *userDatabase) GetUserByID(ctx context.Context, userID string) (user *relation.UserModel, err error) {
|
||||
func (u *userDatabase) GetUserByID(ctx context.Context, userID string) (user *model.User, err error) {
|
||||
return u.userDB.Take(ctx, userID)
|
||||
}
|
||||
|
||||
@ -12,32 +12,20 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package relation
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/db/pagination"
|
||||
)
|
||||
|
||||
type BlackModel struct {
|
||||
OwnerUserID string `bson:"owner_user_id"`
|
||||
BlockUserID string `bson:"block_user_id"`
|
||||
CreateTime time.Time `bson:"create_time"`
|
||||
AddSource int32 `bson:"add_source"`
|
||||
OperatorUserID string `bson:"operator_user_id"`
|
||||
Ex string `bson:"ex"`
|
||||
}
|
||||
|
||||
type BlackModelInterface interface {
|
||||
Create(ctx context.Context, blacks []*BlackModel) (err error)
|
||||
Delete(ctx context.Context, blacks []*BlackModel) (err error)
|
||||
// UpdateByMap(ctx context.Context, ownerUserID, blockUserID string, args map[string]any) (err error)
|
||||
// Update(ctx context.Context, blacks []*BlackModel) (err error)
|
||||
Find(ctx context.Context, blacks []*BlackModel) (blackList []*BlackModel, err error)
|
||||
Take(ctx context.Context, ownerUserID, blockUserID string) (black *BlackModel, err error)
|
||||
FindOwnerBlacks(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (total int64, blacks []*BlackModel, err error)
|
||||
FindOwnerBlackInfos(ctx context.Context, ownerUserID string, userIDs []string) (blacks []*BlackModel, err error)
|
||||
type Black interface {
|
||||
Create(ctx context.Context, blacks []*model.Black) (err error)
|
||||
Delete(ctx context.Context, blacks []*model.Black) (err error)
|
||||
Find(ctx context.Context, blacks []*model.Black) (blackList []*model.Black, err error)
|
||||
Take(ctx context.Context, ownerUserID, blockUserID string) (black *model.Black, err error)
|
||||
FindOwnerBlacks(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (total int64, blacks []*model.Black, err error)
|
||||
FindOwnerBlackInfos(ctx context.Context, ownerUserID string, userIDs []string) (blacks []*model.Black, err error)
|
||||
FindBlackUserIDs(ctx context.Context, ownerUserID string) (blackUserIDs []string, err error)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user