Hi Team,
I am using latest scala stream enricher 0.21.0. I am getting below error when I use custom kinesis endpoint.
Received error response: com.amazonaws.services.kinesis.model.AmazonKinesisException: Credential should be scoped to a valid region, not ‘vpce’ (Service: AmazonKinesis; Status Code: 400; Error Code: InvalidSignatureException.
I observed in debug logs where credentialscope for AWS4 Canonical Request coming as 20190611/vpce/kinesis/aws4_request. This is coming for only enricher not for collector
Please help on this.
customEndpoint = vpce-XXXXXXXXXXX-XXXXXXXXXXX.kinesis.eu-west-1.vpce.amazonaws.com
enrich {
streams {
in {
# Stream/topic where the raw events to be enriched are located
raw = RawEventsStream
}
out {
# Stream/topic where the events that were successfully enriched will end up
enriched = GoodEventsStream
# Stream/topic where the event that failed enrichment will be stored
bad = BadEventsStream
# How the output stream/topic will be partitioned.
# Possible partition keys are: event_id, event_fingerprint, domain_userid, network_userid,
# user_ipaddress, domain_sessionid, user_fingerprint.
# Refer to https://github.com/snowplow/snowplow/wiki/canonical-event-model to know what the
# possible parittion keys correspond to.
# Otherwise, the partition key will be a random UUID.
# Note: Nsq does not make use of partition key.
partitionKey = "id"
}
# Configuration shown is for Kinesis, to use another uncomment the appropriate configuration
# and comment out the other
# To use stdin, comment or remove everything in the "enrich.streams.sourceSink" section except
# "enabled" which should be set to "stdin".
sourceSink {
# Sources / sinks currently supported are:
# 'kinesis' for reading Thrift-serialized records and writing enriched and bad events to a
# Kinesis stream
# 'googlepubsub' for reading / writing to a Google PubSub topic
# 'kafka' for reading / writing to a Kafka topic
# 'nsq' for reading / writing to a Nsq topic
# 'stdin' for reading from stdin and writing to stdout and stderr
enabled = kinesis
# Region where the streams are located
region = eu-west-1
## Optional endpoint url configuration to override aws kinesis endpoints,
## this can be used to specify local endpoints when using localstack
customEndpoint = vpce-XXXXXXXXXXX-XXXXXXXXXXX.kinesis.eu-west-1.vpce.amazonaws.com
# AWS credentials
# If both are set to 'default', use the default AWS credentials provider chain.
# If both are set to 'iam', use AWS IAM Roles to provision credentials.
# If both are set to 'env', use env variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
aws {
accessKey = default
secretKey = default
}
# Maximum number of records to get from Kinesis per call to GetRecords
maxRecords = 100
# LATEST: most recent data.
# TRIM_HORIZON: oldest available data.
# "AT_TIMESTAMP": Start from the record at or after the specified timestamp
# Note: This only effects the first run of this application on a stream.
initialPosition = LATEST
# Need to be specified when initial-position is "AT_TIMESTAMP".
# Timestamp format need to be in "yyyy-MM-ddTHH:mm:ssZ".
# Ex: "2017-05-17T10:00:00Z"
# Note: Time need to specified in UTC.
initialTimestamp = "{{initialTimestamp}}"
# Minimum and maximum backoff periods, in milliseconds
backoffPolicy {
minBackoff: 3000
maxBackoff: 60000
}
# Or Google PubSub
#googleProjectId = my-project-id
## Size of the subscriber thread pool
#threadPoolSize = 4
## Minimum, maximum and total backoff periods, in milliseconds
## and multiplier between two backoffs
#backoffPolicy {
# minBackoff = {{enrichStreamsOutMinBackoff}}
# maxBackoff = {{enrichStreamsOutMaxBackoff}}
# totalBackoff = {{enrichStreamsOutTotalBackoff}} # must be >= 10000
# multiplier = {{enrichStreamsOutTotalBackoff}}
#}
# Or Kafka
#brokers = "{{kafkaBrokers}}"
## Number of retries to perform before giving up on sending a record
#retries = 0
# Or NSQ
## Channel name for nsq source
## If more than one application is reading from the same NSQ topic at the same time,
## all of them must have the same channel name
#rawChannel = "{{nsqSourceChannelName}}"
## Host name for nsqd
#host = "{{nsqHost}}"
## TCP port for nsqd, 4150 by default
#port = {{nsqdPort}}
## Host name for lookupd
#lookupHost = "{{lookupHost}}"
## HTTP port for nsqlookupd, 4161 by default
#lookupPort = {{nsqlookupdPort}}
}
# After enrichment, events are accumulated in a buffer before being sent to Kinesis/Kafka.
# Note: Buffering is not supported by NSQ.
# The buffer is emptied whenever:
# - the number of stored records reaches recordLimit or
# - the combined size of the stored records reaches byteLimit or
# - the time in milliseconds since it was last emptied exceeds timeLimit when
# a new event enters the buffer
buffer {
byteLimit = 4800000
recordLimit = 50
timeLimit = 5000
}
# Used for a DynamoDB table to maintain stream state.
# Used as the Kafka consumer group ID.
# Used as the Google PubSub subscription name.
appName = "AppName"
}