You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Hello Everyone,
I'm using ECS DELL as my S3, and after sometime I start to see this particupar message for all my operatorations level=error ts=2024-12-22T04:41:59.49184793Z caller=flush.go:233 org_id=single-tenant msg="error performing op in flushQueue" op=1 block=e39622c4-3777-4c76-b659-178a5d728286 attempts=58 err="error copying block from local to remote backend: error writing object to s3 backend, object single-tenant/e39622c4-3777-4c76-b659-178a5d728286/data.parquet: Check if quota has been exceeded or object has too many versions" level=info ts=2024-12-22T04:41:59.491907604Z caller=flush.go:391 org_id=single-tenant msg="retrying op in flushQueue" op=1 block=e39622c4-3777-4c76-b659-178a5d728286 backoff=2m0s level=info ts=2024-12-22T04:41:59.491946293Z caller=flush.go:312 msg="flushing block" userid=single-tenant block=0014813c-fe0e-46bb-b62b-2d0fa1e44a6e level=error ts=2024-12-22T04:41:59.574035199Z caller=flush.go:233 org_id=single-tenant msg="error performing op in flushQueue" op=1 block=65a133a1-c6f7-4096-8d9f-2c18f13cd4e2 attempts=417 err="error copying block from local to remote backend: error writing object to s3 backend, object single-tenant/65a133a1-c6f7-4096-8d9f-2c18f13cd4e2/data.parquet: Check if quota has been exceeded or object has too many versions" level=info ts=2024-12-22T04:41:59.574077979Z caller=flush.go:391 org_id=single-tenant msg="retrying op in flushQueue" op=1 block=65a133a1-c6f7-4096-8d9f-2c18f13cd4e2 backoff=2m0s level=info ts=2024-12-22T04:41:59.574109427Z caller=flush.go:312 msg="flushing block" userid=single-tenant block=e01c91eb-aac9-4da8-884c-eb8c117d609a level=error ts=2024-12-22T04:41:59.680115917Z caller=flush.go:233 org_id=single-tenant msg="error performing op in flushQueue" op=1 block=45cad84c-3efa-4696-99af-160ea0b44510 attempts=219 err="error copying block from local to remote backend: error writing object to s3 backend, object single-tenant/45cad84c-3efa-4696-99af-160ea0b44510/data.parquet: Check if quota has been exceeded or object has too many versions" level=info ts=2024-12-22T04:41:59.680169029Z caller=flush.go:391 org_id=single-tenant msg="retrying op in flushQueue" op=1 block=45cad84c-3efa-4696-99af-160ea0b44510 backoff=2m0s level=info ts=2024-12-22T04:41:59.68021364Z caller=flush.go:312 msg="flushing block" userid=single-tenant block=2df5edb5-ed63-4a6a-bb19-9c879f9b71e3 level=error ts=2024-12-22T04:42:00.488218529Z caller=flush.go:233 org_id=single-tenant msg="error performing op in flushQueue" op=1 block=ab69dea3-49c3-4d40-aff7-721fe737be02 attempts=437 err="error copying block from local to remote backend: error writing object to s3 backend, object single-tenant/ab69dea3-49c3-4d40-aff7-721fe737be02/data.parquet: Check if quota has been exceeded or object has too many versions" level=info ts=2024-12-22T04:42:00.488280096Z caller=flush.go:391 org_id=single-tenant msg="retrying op in flushQueue" op=1 block=ab69dea3-49c3-4d40-aff7-721fe737be02 backoff=2m0s level=info ts=2024-12-22T04:42:00.488323419Z caller=flush.go:312 msg="flushing block" userid=single-tenant block=273b2452-3b61-4c24-bb29-efe574912469 level=error ts=2024-12-22T04:42:01.077140381Z caller=flush.go:233 org_id=single-tenant msg="error performing op in flushQueue" op=1 block=2df5edb5-ed63-4a6a-bb19-9c879f9b71e3 attempts=652 err="error copying block from local to remote backend: error writing object to s3 backend, object single-tenant/2df5edb5-ed63-4a6a-bb19-9c879f9b71e3/data.parquet: Check if quota has been exceeded or object has too many versions" level=info ts=2024-12-22T04:42:01.07718453Z caller=flush.go:391 org_id=single-tenant msg="retrying op in flushQueue" op=1 block=2df5edb5-ed63-4a6a-bb19-9c879f9b71e3 backoff=2m0s level=info ts=2024-12-22T04:42:01.077232484Z caller=flush.go:312 msg="flushing block" userid=single-tenant block=bdf3becb-b9bd-46f5-8aba-e5d43632c1dd level=error ts=2024-12-22T04:42:01.078250022Z caller=flush.go:233 org_id=single-tenant msg="error performing op in flushQueue" op=1 block=e01c91eb-aac9-4da8-884c-eb8c117d609a attempts=339 err="error copying block from local to remote backend: error writing object to s3 backend, object single-tenant/e01c91eb-aac9-4da8-884c-eb8c117d609a/data.parquet: Check if quota has been exceeded or object has too many versions" level=info ts=2024-12-22T04:42:01.078277919Z caller=flush.go:391 org_id=single-tenant msg="retrying op in flushQueue" op=1 block=e01c91eb-aac9-4da8-884c-eb8c117d609a backoff=2m0s level=info ts=2024-12-22T04:42:01.078297462Z caller=flush.go:312 msg="flushing block" userid=single-tenant block=09c3480b-4e98-43e7-a6e3-d4bb54ba2bd7 level=error ts=2024-12-22T04:42:01.177086297Z caller=flush.go:233 org_id=single-tenant msg="error performing op in flushQueue" op=1 block=0014813c-fe0e-46bb-b62b-2d0fa1e44a6e attempts=209 err="error copying block from local to remote backend: error writing object to s3 backend, object single-tenant/0014813c-fe0e-46bb-b62b-2d0fa1e44a6e/data.parquet: Check if quota has been exceeded or object has too many versions" level=info ts=2024-12-22T04:42:01.177146201Z caller=flush.go:391 org_id=single-tenant msg="retrying op in flushQueue" op=1 block=0014813c-fe0e-46bb-b62b-2d0fa1e44a6e backoff=2m0s level=info ts=2024-12-22T04:42:01.177181411Z caller=flush.go:312 msg="flushing block" userid=single-tenant block=9d9751fc-ec23-44ba-af18-f8eb1b0ed141
Is worth mentioning that the bucket isn't full or the quota has been exceeded, and after this error start to show, I'm not able to filter trace anymore, and receive this message for all my search. failed to get trace with id: ff2c36c118818af306668fabc20c2dff Status: 500 Internal Server Error Body: error finding trace by id, blockID: 67b69e1d-17df-4b96-b7fe-7f8776751f14: error retrieving bloom bloom-0 (single-tenant, 67b69e1d-17df-4b96-b7fe-7f8776751f14): does not exist
with different blockId and bloom each time
Logs from S3
Hi, You should address this to your s3 provider and review your usage\settings it looks like you hit some preconfigured limits
@edgarkz I did that and there are no settings regarding the usage or quota. It is also worth mentioning that this problem start when we update from 2.4 to 2.6 and change to parquet4
Hello Everyone,
I'm using ECS DELL as my S3, and after sometime I start to see this particupar message for all my operatorations
level=error ts=2024-12-22T04:41:59.49184793Z caller=flush.go:233 org_id=single-tenant msg="error performing op in flushQueue" op=1 block=e39622c4-3777-4c76-b659-178a5d728286 attempts=58 err="error copying block from local to remote backend: error writing object to s3 backend, object single-tenant/e39622c4-3777-4c76-b659-178a5d728286/data.parquet: Check if quota has been exceeded or object has too many versions" level=info ts=2024-12-22T04:41:59.491907604Z caller=flush.go:391 org_id=single-tenant msg="retrying op in flushQueue" op=1 block=e39622c4-3777-4c76-b659-178a5d728286 backoff=2m0s level=info ts=2024-12-22T04:41:59.491946293Z caller=flush.go:312 msg="flushing block" userid=single-tenant block=0014813c-fe0e-46bb-b62b-2d0fa1e44a6e level=error ts=2024-12-22T04:41:59.574035199Z caller=flush.go:233 org_id=single-tenant msg="error performing op in flushQueue" op=1 block=65a133a1-c6f7-4096-8d9f-2c18f13cd4e2 attempts=417 err="error copying block from local to remote backend: error writing object to s3 backend, object single-tenant/65a133a1-c6f7-4096-8d9f-2c18f13cd4e2/data.parquet: Check if quota has been exceeded or object has too many versions" level=info ts=2024-12-22T04:41:59.574077979Z caller=flush.go:391 org_id=single-tenant msg="retrying op in flushQueue" op=1 block=65a133a1-c6f7-4096-8d9f-2c18f13cd4e2 backoff=2m0s level=info ts=2024-12-22T04:41:59.574109427Z caller=flush.go:312 msg="flushing block" userid=single-tenant block=e01c91eb-aac9-4da8-884c-eb8c117d609a level=error ts=2024-12-22T04:41:59.680115917Z caller=flush.go:233 org_id=single-tenant msg="error performing op in flushQueue" op=1 block=45cad84c-3efa-4696-99af-160ea0b44510 attempts=219 err="error copying block from local to remote backend: error writing object to s3 backend, object single-tenant/45cad84c-3efa-4696-99af-160ea0b44510/data.parquet: Check if quota has been exceeded or object has too many versions" level=info ts=2024-12-22T04:41:59.680169029Z caller=flush.go:391 org_id=single-tenant msg="retrying op in flushQueue" op=1 block=45cad84c-3efa-4696-99af-160ea0b44510 backoff=2m0s level=info ts=2024-12-22T04:41:59.68021364Z caller=flush.go:312 msg="flushing block" userid=single-tenant block=2df5edb5-ed63-4a6a-bb19-9c879f9b71e3 level=error ts=2024-12-22T04:42:00.488218529Z caller=flush.go:233 org_id=single-tenant msg="error performing op in flushQueue" op=1 block=ab69dea3-49c3-4d40-aff7-721fe737be02 attempts=437 err="error copying block from local to remote backend: error writing object to s3 backend, object single-tenant/ab69dea3-49c3-4d40-aff7-721fe737be02/data.parquet: Check if quota has been exceeded or object has too many versions" level=info ts=2024-12-22T04:42:00.488280096Z caller=flush.go:391 org_id=single-tenant msg="retrying op in flushQueue" op=1 block=ab69dea3-49c3-4d40-aff7-721fe737be02 backoff=2m0s level=info ts=2024-12-22T04:42:00.488323419Z caller=flush.go:312 msg="flushing block" userid=single-tenant block=273b2452-3b61-4c24-bb29-efe574912469 level=error ts=2024-12-22T04:42:01.077140381Z caller=flush.go:233 org_id=single-tenant msg="error performing op in flushQueue" op=1 block=2df5edb5-ed63-4a6a-bb19-9c879f9b71e3 attempts=652 err="error copying block from local to remote backend: error writing object to s3 backend, object single-tenant/2df5edb5-ed63-4a6a-bb19-9c879f9b71e3/data.parquet: Check if quota has been exceeded or object has too many versions" level=info ts=2024-12-22T04:42:01.07718453Z caller=flush.go:391 org_id=single-tenant msg="retrying op in flushQueue" op=1 block=2df5edb5-ed63-4a6a-bb19-9c879f9b71e3 backoff=2m0s level=info ts=2024-12-22T04:42:01.077232484Z caller=flush.go:312 msg="flushing block" userid=single-tenant block=bdf3becb-b9bd-46f5-8aba-e5d43632c1dd level=error ts=2024-12-22T04:42:01.078250022Z caller=flush.go:233 org_id=single-tenant msg="error performing op in flushQueue" op=1 block=e01c91eb-aac9-4da8-884c-eb8c117d609a attempts=339 err="error copying block from local to remote backend: error writing object to s3 backend, object single-tenant/e01c91eb-aac9-4da8-884c-eb8c117d609a/data.parquet: Check if quota has been exceeded or object has too many versions" level=info ts=2024-12-22T04:42:01.078277919Z caller=flush.go:391 org_id=single-tenant msg="retrying op in flushQueue" op=1 block=e01c91eb-aac9-4da8-884c-eb8c117d609a backoff=2m0s level=info ts=2024-12-22T04:42:01.078297462Z caller=flush.go:312 msg="flushing block" userid=single-tenant block=09c3480b-4e98-43e7-a6e3-d4bb54ba2bd7 level=error ts=2024-12-22T04:42:01.177086297Z caller=flush.go:233 org_id=single-tenant msg="error performing op in flushQueue" op=1 block=0014813c-fe0e-46bb-b62b-2d0fa1e44a6e attempts=209 err="error copying block from local to remote backend: error writing object to s3 backend, object single-tenant/0014813c-fe0e-46bb-b62b-2d0fa1e44a6e/data.parquet: Check if quota has been exceeded or object has too many versions" level=info ts=2024-12-22T04:42:01.177146201Z caller=flush.go:391 org_id=single-tenant msg="retrying op in flushQueue" op=1 block=0014813c-fe0e-46bb-b62b-2d0fa1e44a6e backoff=2m0s level=info ts=2024-12-22T04:42:01.177181411Z caller=flush.go:312 msg="flushing block" userid=single-tenant block=9d9751fc-ec23-44ba-af18-f8eb1b0ed141
Is worth mentioning that the bucket isn't full or the quota has been exceeded, and after this error start to show, I'm not able to filter trace anymore, and receive this message for all my search.
failed to get trace with id: ff2c36c118818af306668fabc20c2dff Status: 500 Internal Server Error Body: error finding trace by id, blockID: 67b69e1d-17df-4b96-b7fe-7f8776751f14: error retrieving bloom bloom-0 (single-tenant, 67b69e1d-17df-4b96-b7fe-7f8776751f14): does not exist
with different blockId and bloom each time
Logs from S3
To Reproduce
Steps to reproduce the behavior:
Expected behavior
Able to search and write traces to S3
Environment:
Additional Context
`backend: 127.0.0.1:3100
tempo.yaml:
cache:
caches:
consistent_hash: true
host: 'grafana-tempo-memcached'
service: memcached-client
timeout: 500ms
roles:
compactor:
compaction:
block_retention: 168h
compacted_block_retention: 1h
compaction_cycle: 30s
compaction_window: 1h
max_block_bytes: 107374182400
max_compaction_objects: 6000000
max_time_per_tenant: 5m
retention_concurrency: 10
v2_in_buffer_bytes: 5242880
v2_out_buffer_bytes: 20971520
v2_prefetch_traces_count: 1000
ring:
kvstore:
store: memberlist
distributor:
receivers:
jaeger:
protocols:
grpc:
endpoint: 0.0.0.0:14250
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
ring:
kvstore:
store: memberlist
ingester:
complete_block_timeout: 10m
flush_check_period: 5s
lifecycler:
ring:
kvstore:
store: memberlist
replication_factor: 3
tokens_file_path: /var/tempo/tokens.json
max_block_duration: 5m
trace_idle_period: 5s
memberlist:
abort_if_cluster_join_fails: false
bind_addr: []
bind_port: 7946
cluster_label: 'grafana-tempo.tempo'
gossip_interval: 1s
gossip_nodes: 2
gossip_to_dead_nodes_time: 30s
join_members:
leave_timeout: 5s
left_ingesters_timeout: 5m
max_join_backoff: 1m
max_join_retries: 10
min_join_backoff: 1s
node_name: ""
packet_dial_timeout: 5s
packet_write_timeout: 5s
pull_push_interval: 30s
randomize_node_name: true
rejoin_interval: 0s
retransmit_factor: 2
stream_timeout: 10s
metrics_generator:
metrics_ingestion_time_range_slack: 30s
processor:
service_graphs:
dimensions: []
histogram_buckets:
max_items: 10000
wait: 10s
workers: 10
span_metrics:
dimensions: []
histogram_buckets:
registry:
collection_interval: 15s
external_labels: {}
stale_duration: 15m
ring:
kvstore:
store: memberlist
storage:
path: /var/tempo/wal
remote_write:
remote_write_add_org_id_header: false
remote_write_flush_deadline: 1m
traces_storage:
path: /var/tempo/traces
multitenancy_enabled: false
overrides:
max_bytes_per_trace: 50000000
max_traces_per_user: 30000000
metrics_generator_processors:
per_tenant_override_config: /runtime-config/overrides.yaml
querier:
frontend_worker:
frontend_address: grafana-tempo-query-frontend-discovery:9095
max_concurrent_queries: 20
search:
external_backend: null
external_endpoints: []
external_hedge_requests_at: 8s
external_hedge_requests_up_to: 2
prefer_self: 10
query_timeout: 30s
trace_by_id:
query_timeout: 10s
query_frontend:
max_outstanding_per_tenant: 2000
max_retries: 2
metrics:
max_duration: 3h
search:
concurrent_jobs: 1000
target_bytes_per_job: 104857600
trace_by_id:
query_shards: 100
server:
grpc_server_max_recv_msg_size: 100000000
grpc_server_max_send_msg_size: 100000000
http_listen_port: 3100
http_server_read_timeout: 30s
http_server_write_timeout: 30s
log_format: logfmt
log_level: info
storage:
trace:
backend: s3
block:
parquet_dedicated_columns:
scope: span
type: string
scope: span
type: string
scope: span
type: string
scope: span
type: string
scope: span
type: string
scope: span
type: string
scope: resource
type: string
scope: resource
type: string
version: vParquet4
blocklist_poll: 5m
local:
path: /var/tempo/traces
pool:
max_workers: 400
queue_depth: 20000
s3:
access_key: ##############
bucket: ##############
endpoint: ##############
secret_key: ##############
tls_insecure_skip_verify: true
wal:
path: /var/tempo/wal
usage_report:
reporting_enabled: true
BinaryData
Events:
`
The text was updated successfully, but these errors were encountered: