@@ -53,26 +53,39 @@ func (f *route53Factory) NewClient(authConfig AuthConfig) (Interface, error) {
5353 if err != nil {
5454 return nil , err
5555 }
56- c .Route53RateLimiter = f .getRateLimiter (authConfig .AccessKey .ID )
56+
57+ var rateLimiterKey string
58+ if authConfig .AccessKey != nil {
59+ rateLimiterKey = authConfig .AccessKey .ID
60+ } else {
61+ // In practice single AWS Role (the same roleARN) can be assumed by multiple Workload Identities.
62+ // A side effect of rate limiter using the roleARN as key is that all Workload Identities assuming the same
63+ // RoleARN will be throttled at the same time.
64+ // However, most probably on the server side(AWS STS) they would be throttled also on the roleARN
65+ // as this is the identity authenticated with AWS.
66+ rateLimiterKey = authConfig .WorkloadIdentity .RoleARN
67+ }
68+
69+ c .Route53RateLimiter = f .getRateLimiter (rateLimiterKey )
5770 c .Route53RateLimiterWaitTimeout = f .waitTimeout
5871 return c , nil
5972}
6073
61- func (f * route53Factory ) getRateLimiter (accessKeyID string ) * rate.Limiter {
74+ func (f * route53Factory ) getRateLimiter (rateLimiterKey string ) * rate.Limiter {
6275 // cache.Expiring Get and Set methods are concurrency-safe
6376 // However, if f rate limiter is not present in the cache, it may happen that multiple rate limiters are created
64- // at the same time for the same access key id , and the desired QPS is exceeded, so use f mutex to guard against this
77+ // at the same time for the same rate limiter key , and the desired QPS is exceeded, so use f mutex to guard against this
6578 f .rateLimitersMutex .Lock ()
6679 defer f .rateLimitersMutex .Unlock ()
6780
6881 // Get f rate limiter from the cache, or create f new one if not present
6982 var rateLimiter * rate.Limiter
70- if v , ok := f .rateLimiters .Get (accessKeyID ); ok {
83+ if v , ok := f .rateLimiters .Get (rateLimiterKey ); ok {
7184 rateLimiter = v .(* rate.Limiter )
7285 } else {
7386 rateLimiter = rate .NewLimiter (f .limit , f .burst )
7487 }
7588 // Set should be called on every Get with cache.Expiring to refresh the TTL
76- f .rateLimiters .Set (accessKeyID , rateLimiter , route53RateLimiterCacheTTL )
89+ f .rateLimiters .Set (rateLimiterKey , rateLimiter , route53RateLimiterCacheTTL )
7790 return rateLimiter
7891}
0 commit comments