axios+telemetry cleanup
This commit is contained in:
@@ -1,38 +0,0 @@
|
||||
/**
|
||||
* Shared analytics configuration
|
||||
*
|
||||
* Common logic for determining when analytics should be disabled
|
||||
* across all analytics systems (Datadog, 1P)
|
||||
*/
|
||||
|
||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import { isTelemetryDisabled } from '../../utils/privacyLevel.js'
|
||||
|
||||
/**
|
||||
* Check if analytics operations should be disabled
|
||||
*
|
||||
* Analytics is disabled in the following cases:
|
||||
* - Test environment (NODE_ENV === 'test')
|
||||
* - Third-party cloud providers (Bedrock/Vertex)
|
||||
* - Privacy level is no-telemetry or essential-traffic
|
||||
*/
|
||||
export function isAnalyticsDisabled(): boolean {
|
||||
return (
|
||||
process.env.NODE_ENV === 'test' ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) ||
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY) ||
|
||||
isTelemetryDisabled()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the feedback survey should be suppressed.
|
||||
*
|
||||
* Unlike isAnalyticsDisabled(), this does NOT block on 3P providers
|
||||
* (Bedrock/Vertex/Foundry). The survey is a local UI prompt with no
|
||||
* transcript data — enterprise customers capture responses via OTEL.
|
||||
*/
|
||||
export function isFeedbackSurveyDisabled(): boolean {
|
||||
return process.env.NODE_ENV === 'test' || isTelemetryDisabled()
|
||||
}
|
||||
@@ -1,307 +0,0 @@
|
||||
import axios from 'axios'
|
||||
import { createHash } from 'crypto'
|
||||
import memoize from 'lodash-es/memoize.js'
|
||||
import { getOrCreateUserID } from '../../utils/config.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
import { getCanonicalName } from '../../utils/model/model.js'
|
||||
import { getAPIProvider } from '../../utils/model/providers.js'
|
||||
import { MODEL_COSTS } from '../../utils/modelCost.js'
|
||||
import { isAnalyticsDisabled } from './config.js'
|
||||
import { getEventMetadata } from './metadata.js'
|
||||
|
||||
const DATADOG_LOGS_ENDPOINT =
|
||||
'https://http-intake.logs.us5.datadoghq.com/api/v2/logs'
|
||||
const DATADOG_CLIENT_TOKEN = 'pubbbf48e6d78dae54bceaa4acf463299bf'
|
||||
const DEFAULT_FLUSH_INTERVAL_MS = 15000
|
||||
const MAX_BATCH_SIZE = 100
|
||||
const NETWORK_TIMEOUT_MS = 5000
|
||||
|
||||
const DATADOG_ALLOWED_EVENTS = new Set([
|
||||
'chrome_bridge_connection_succeeded',
|
||||
'chrome_bridge_connection_failed',
|
||||
'chrome_bridge_disconnected',
|
||||
'chrome_bridge_tool_call_completed',
|
||||
'chrome_bridge_tool_call_error',
|
||||
'chrome_bridge_tool_call_started',
|
||||
'chrome_bridge_tool_call_timeout',
|
||||
'tengu_api_error',
|
||||
'tengu_api_success',
|
||||
'tengu_brief_mode_enabled',
|
||||
'tengu_brief_mode_toggled',
|
||||
'tengu_brief_send',
|
||||
'tengu_cancel',
|
||||
'tengu_compact_failed',
|
||||
'tengu_exit',
|
||||
'tengu_flicker',
|
||||
'tengu_init',
|
||||
'tengu_model_fallback_triggered',
|
||||
'tengu_oauth_error',
|
||||
'tengu_oauth_success',
|
||||
'tengu_oauth_token_refresh_failure',
|
||||
'tengu_oauth_token_refresh_success',
|
||||
'tengu_oauth_token_refresh_lock_acquiring',
|
||||
'tengu_oauth_token_refresh_lock_acquired',
|
||||
'tengu_oauth_token_refresh_starting',
|
||||
'tengu_oauth_token_refresh_completed',
|
||||
'tengu_oauth_token_refresh_lock_releasing',
|
||||
'tengu_oauth_token_refresh_lock_released',
|
||||
'tengu_query_error',
|
||||
'tengu_session_file_read',
|
||||
'tengu_started',
|
||||
'tengu_tool_use_error',
|
||||
'tengu_tool_use_granted_in_prompt_permanent',
|
||||
'tengu_tool_use_granted_in_prompt_temporary',
|
||||
'tengu_tool_use_rejected_in_prompt',
|
||||
'tengu_tool_use_success',
|
||||
'tengu_uncaught_exception',
|
||||
'tengu_unhandled_rejection',
|
||||
'tengu_voice_recording_started',
|
||||
'tengu_voice_toggled',
|
||||
'tengu_team_mem_sync_pull',
|
||||
'tengu_team_mem_sync_push',
|
||||
'tengu_team_mem_sync_started',
|
||||
'tengu_team_mem_entries_capped',
|
||||
])
|
||||
|
||||
const TAG_FIELDS = [
|
||||
'arch',
|
||||
'clientType',
|
||||
'errorType',
|
||||
'http_status_range',
|
||||
'http_status',
|
||||
'kairosActive',
|
||||
'model',
|
||||
'platform',
|
||||
'provider',
|
||||
'skillMode',
|
||||
'subscriptionType',
|
||||
'toolName',
|
||||
'userBucket',
|
||||
'userType',
|
||||
'version',
|
||||
'versionBase',
|
||||
]
|
||||
|
||||
function camelToSnakeCase(str: string): string {
|
||||
return str.replace(/[A-Z]/g, letter => `_${letter.toLowerCase()}`)
|
||||
}
|
||||
|
||||
type DatadogLog = {
|
||||
ddsource: string
|
||||
ddtags: string
|
||||
message: string
|
||||
service: string
|
||||
hostname: string
|
||||
[key: string]: unknown
|
||||
}
|
||||
|
||||
let logBatch: DatadogLog[] = []
|
||||
let flushTimer: NodeJS.Timeout | null = null
|
||||
let datadogInitialized: boolean | null = null
|
||||
|
||||
async function flushLogs(): Promise<void> {
|
||||
if (logBatch.length === 0) return
|
||||
|
||||
const logsToSend = logBatch
|
||||
logBatch = []
|
||||
|
||||
try {
|
||||
await axios.post(DATADOG_LOGS_ENDPOINT, logsToSend, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'DD-API-KEY': DATADOG_CLIENT_TOKEN,
|
||||
},
|
||||
timeout: NETWORK_TIMEOUT_MS,
|
||||
})
|
||||
} catch (error) {
|
||||
logError(error)
|
||||
}
|
||||
}
|
||||
|
||||
function scheduleFlush(): void {
|
||||
if (flushTimer) return
|
||||
|
||||
flushTimer = setTimeout(() => {
|
||||
flushTimer = null
|
||||
void flushLogs()
|
||||
}, getFlushIntervalMs()).unref()
|
||||
}
|
||||
|
||||
export const initializeDatadog = memoize(async (): Promise<boolean> => {
|
||||
if (isAnalyticsDisabled()) {
|
||||
datadogInitialized = false
|
||||
return false
|
||||
}
|
||||
|
||||
try {
|
||||
datadogInitialized = true
|
||||
return true
|
||||
} catch (error) {
|
||||
logError(error)
|
||||
datadogInitialized = false
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
/**
|
||||
* Flush remaining Datadog logs and shut down.
|
||||
* Called from gracefulShutdown() before process.exit() since
|
||||
* forceExit() prevents the beforeExit handler from firing.
|
||||
*/
|
||||
export async function shutdownDatadog(): Promise<void> {
|
||||
if (flushTimer) {
|
||||
clearTimeout(flushTimer)
|
||||
flushTimer = null
|
||||
}
|
||||
await flushLogs()
|
||||
}
|
||||
|
||||
// NOTE: use via src/services/analytics/index.ts > logEvent
|
||||
export async function trackDatadogEvent(
|
||||
eventName: string,
|
||||
properties: { [key: string]: boolean | number | undefined },
|
||||
): Promise<void> {
|
||||
if (process.env.NODE_ENV !== 'production') {
|
||||
return
|
||||
}
|
||||
|
||||
// Don't send events for 3P providers (Bedrock, Vertex, Foundry)
|
||||
if (getAPIProvider() !== 'firstParty') {
|
||||
return
|
||||
}
|
||||
|
||||
// Fast path: use cached result if available to avoid await overhead
|
||||
let initialized = datadogInitialized
|
||||
if (initialized === null) {
|
||||
initialized = await initializeDatadog()
|
||||
}
|
||||
if (!initialized || !DATADOG_ALLOWED_EVENTS.has(eventName)) {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
const metadata = await getEventMetadata({
|
||||
model: properties.model,
|
||||
betas: properties.betas,
|
||||
})
|
||||
// Destructure to avoid duplicate envContext (once nested, once flattened)
|
||||
const { envContext, ...restMetadata } = metadata
|
||||
const allData: Record<string, unknown> = {
|
||||
...restMetadata,
|
||||
...envContext,
|
||||
...properties,
|
||||
userBucket: getUserBucket(),
|
||||
}
|
||||
|
||||
// Normalize MCP tool names to "mcp" for cardinality reduction
|
||||
if (
|
||||
typeof allData.toolName === 'string' &&
|
||||
allData.toolName.startsWith('mcp__')
|
||||
) {
|
||||
allData.toolName = 'mcp'
|
||||
}
|
||||
|
||||
// Normalize model names for cardinality reduction (external users only)
|
||||
if (process.env.USER_TYPE !== 'ant' && typeof allData.model === 'string') {
|
||||
const shortName = getCanonicalName(allData.model.replace(/\[1m]$/i, ''))
|
||||
allData.model = shortName in MODEL_COSTS ? shortName : 'other'
|
||||
}
|
||||
|
||||
// Truncate dev version to base + date (remove timestamp and sha for cardinality reduction)
|
||||
// e.g. "2.0.53-dev.20251124.t173302.sha526cc6a" -> "2.0.53-dev.20251124"
|
||||
if (typeof allData.version === 'string') {
|
||||
allData.version = allData.version.replace(
|
||||
/^(\d+\.\d+\.\d+-dev\.\d{8})\.t\d+\.sha[a-f0-9]+$/,
|
||||
'$1',
|
||||
)
|
||||
}
|
||||
|
||||
// Transform status to http_status and http_status_range to avoid Datadog reserved field
|
||||
if (allData.status !== undefined && allData.status !== null) {
|
||||
const statusCode = String(allData.status)
|
||||
allData.http_status = statusCode
|
||||
|
||||
// Determine status range (1xx, 2xx, 3xx, 4xx, 5xx)
|
||||
const firstDigit = statusCode.charAt(0)
|
||||
if (firstDigit >= '1' && firstDigit <= '5') {
|
||||
allData.http_status_range = `${firstDigit}xx`
|
||||
}
|
||||
|
||||
// Remove original status field to avoid conflict with Datadog's reserved field
|
||||
delete allData.status
|
||||
}
|
||||
|
||||
// Build ddtags with high-cardinality fields for filtering.
|
||||
// event:<name> is prepended so the event name is searchable via the
|
||||
// log search API — the `message` field (where eventName also lives)
|
||||
// is a DD reserved field and is NOT queryable from dashboard widget
|
||||
// queries or the aggregation API. See scripts/release/MONITORING.md.
|
||||
const allDataRecord = allData
|
||||
const tags = [
|
||||
`event:${eventName}`,
|
||||
...TAG_FIELDS.filter(
|
||||
field =>
|
||||
allDataRecord[field] !== undefined && allDataRecord[field] !== null,
|
||||
).map(field => `${camelToSnakeCase(field)}:${allDataRecord[field]}`),
|
||||
]
|
||||
|
||||
const log: DatadogLog = {
|
||||
ddsource: 'nodejs',
|
||||
ddtags: tags.join(','),
|
||||
message: eventName,
|
||||
service: 'claude-code',
|
||||
hostname: 'claude-code',
|
||||
env: process.env.USER_TYPE,
|
||||
}
|
||||
|
||||
// Add all fields as searchable attributes (not duplicated in tags)
|
||||
for (const [key, value] of Object.entries(allData)) {
|
||||
if (value !== undefined && value !== null) {
|
||||
log[camelToSnakeCase(key)] = value
|
||||
}
|
||||
}
|
||||
|
||||
logBatch.push(log)
|
||||
|
||||
// Flush immediately if batch is full, otherwise schedule
|
||||
if (logBatch.length >= MAX_BATCH_SIZE) {
|
||||
if (flushTimer) {
|
||||
clearTimeout(flushTimer)
|
||||
flushTimer = null
|
||||
}
|
||||
void flushLogs()
|
||||
} else {
|
||||
scheduleFlush()
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error)
|
||||
}
|
||||
}
|
||||
|
||||
const NUM_USER_BUCKETS = 30
|
||||
|
||||
/**
|
||||
* Gets a 'bucket' that the user ID falls into.
|
||||
*
|
||||
* For alerting purposes, we want to alert on the number of users impacted
|
||||
* by an issue, rather than the number of events- often a small number of users
|
||||
* can generate a large number of events (e.g. due to retries). To approximate
|
||||
* this without ruining cardinality by counting user IDs directly, we hash the user ID
|
||||
* and assign it to one of a fixed number of buckets.
|
||||
*
|
||||
* This allows us to estimate the number of unique users by counting unique buckets,
|
||||
* while preserving user privacy and reducing cardinality.
|
||||
*/
|
||||
const getUserBucket = memoize((): number => {
|
||||
const userId = getOrCreateUserID()
|
||||
const hash = createHash('sha256').update(userId).digest('hex')
|
||||
return parseInt(hash.slice(0, 8), 16) % NUM_USER_BUCKETS
|
||||
})
|
||||
|
||||
function getFlushIntervalMs(): number {
|
||||
// Allow tests to override to not block on the default flush interval.
|
||||
return (
|
||||
parseInt(process.env.CLAUDE_CODE_DATADOG_FLUSH_INTERVAL_MS || '', 10) ||
|
||||
DEFAULT_FLUSH_INTERVAL_MS
|
||||
)
|
||||
}
|
||||
@@ -1,449 +1,33 @@
|
||||
import type { AnyValueMap, Logger, logs } from '@opentelemetry/api-logs'
|
||||
import { resourceFromAttributes } from '@opentelemetry/resources'
|
||||
import {
|
||||
BatchLogRecordProcessor,
|
||||
LoggerProvider,
|
||||
} from '@opentelemetry/sdk-logs'
|
||||
import {
|
||||
ATTR_SERVICE_NAME,
|
||||
ATTR_SERVICE_VERSION,
|
||||
} from '@opentelemetry/semantic-conventions'
|
||||
import { randomUUID } from 'crypto'
|
||||
import { isEqual } from 'lodash-es'
|
||||
import { getOrCreateUserID } from '../../utils/config.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
import { getPlatform, getWslVersion } from '../../utils/platform.js'
|
||||
import { jsonStringify } from '../../utils/slowOperations.js'
|
||||
import { profileCheckpoint } from '../../utils/startupProfiler.js'
|
||||
import { getCoreUserData } from '../../utils/user.js'
|
||||
import { isAnalyticsDisabled } from './config.js'
|
||||
import { FirstPartyEventLoggingExporter } from './firstPartyEventLoggingExporter.js'
|
||||
import type { GrowthBookUserAttributes } from './growthbook.js'
|
||||
import { getDynamicConfig_CACHED_MAY_BE_STALE } from './growthbook.js'
|
||||
import { getEventMetadata } from './metadata.js'
|
||||
import { isSinkKilled } from './sinkKillswitch.js'
|
||||
|
||||
/**
|
||||
* Configuration for sampling individual event types.
|
||||
* Each event name maps to an object containing sample_rate (0-1).
|
||||
* Events not in the config are logged at 100% rate.
|
||||
*/
|
||||
export type EventSamplingConfig = {
|
||||
[eventName: string]: {
|
||||
sample_rate: number
|
||||
}
|
||||
}
|
||||
|
||||
const EVENT_SAMPLING_CONFIG_NAME = 'tengu_event_sampling_config'
|
||||
/**
|
||||
* Get the event sampling configuration from GrowthBook.
|
||||
* Uses cached value if available, updates cache in background.
|
||||
*/
|
||||
export function getEventSamplingConfig(): EventSamplingConfig {
|
||||
return getDynamicConfig_CACHED_MAY_BE_STALE<EventSamplingConfig>(
|
||||
EVENT_SAMPLING_CONFIG_NAME,
|
||||
{},
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if an event should be sampled based on its sample rate.
|
||||
* Returns the sample rate if sampled, null if not sampled.
|
||||
* First Party Event Logger - STUB
|
||||
*
|
||||
* @param eventName - Name of the event to check
|
||||
* @returns The sample_rate if event should be logged, null if it should be dropped
|
||||
* This module has been stubbed out as part of the telemetry purge.
|
||||
* It no longer has any dependencies on @opentelemetry or other analytics packages.
|
||||
*/
|
||||
export function shouldSampleEvent(eventName: string): number | null {
|
||||
const config = getEventSamplingConfig()
|
||||
const eventConfig = config[eventName]
|
||||
|
||||
// If no config for this event, log at 100% rate (no sampling)
|
||||
if (!eventConfig) {
|
||||
return null
|
||||
}
|
||||
|
||||
const sampleRate = eventConfig.sample_rate
|
||||
|
||||
// Validate sample rate is in valid range
|
||||
if (typeof sampleRate !== 'number' || sampleRate < 0 || sampleRate > 1) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Sample rate of 1 means log everything (no need to add metadata)
|
||||
if (sampleRate >= 1) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Sample rate of 0 means drop everything
|
||||
if (sampleRate <= 0) {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Randomly decide whether to sample this event
|
||||
return Math.random() < sampleRate ? sampleRate : 0
|
||||
}
|
||||
|
||||
const BATCH_CONFIG_NAME = 'tengu_1p_event_batch_config'
|
||||
type BatchConfig = {
|
||||
scheduledDelayMillis?: number
|
||||
maxExportBatchSize?: number
|
||||
maxQueueSize?: number
|
||||
skipAuth?: boolean
|
||||
maxAttempts?: number
|
||||
path?: string
|
||||
baseUrl?: string
|
||||
}
|
||||
function getBatchConfig(): BatchConfig {
|
||||
return getDynamicConfig_CACHED_MAY_BE_STALE<BatchConfig>(
|
||||
BATCH_CONFIG_NAME,
|
||||
{},
|
||||
)
|
||||
}
|
||||
|
||||
// Module-local state for event logging (not exposed globally)
|
||||
let firstPartyEventLogger: ReturnType<typeof logs.getLogger> | null = null
|
||||
let firstPartyEventLoggerProvider: LoggerProvider | null = null
|
||||
// Last batch config used to construct the provider — used by
|
||||
// reinitialize1PEventLoggingIfConfigChanged to decide whether a rebuild is
|
||||
// needed when GrowthBook refreshes.
|
||||
let lastBatchConfig: BatchConfig | null = null
|
||||
/**
|
||||
* Flush and shutdown the 1P event logger.
|
||||
* This should be called as the final step before process exit to ensure
|
||||
* all events (including late ones from API responses) are exported.
|
||||
*/
|
||||
export async function shutdown1PEventLogging(): Promise<void> {
|
||||
if (!firstPartyEventLoggerProvider) {
|
||||
return
|
||||
}
|
||||
try {
|
||||
await firstPartyEventLoggerProvider.shutdown()
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging('1P event logging: final shutdown complete')
|
||||
}
|
||||
} catch {
|
||||
// Ignore shutdown errors
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if 1P event logging is enabled.
|
||||
* Respects the same opt-outs as other analytics sinks:
|
||||
* - Test environment
|
||||
* - Third-party cloud providers (Bedrock/Vertex)
|
||||
* - Global telemetry opt-outs
|
||||
* - Non-essential traffic disabled
|
||||
*
|
||||
* Note: Unlike BigQuery metrics, event logging does NOT check organization-level
|
||||
* metrics opt-out via API. It follows the same pattern as Statsig event logging.
|
||||
*/
|
||||
export function is1PEventLoggingEnabled(): boolean {
|
||||
// Respect standard analytics opt-outs
|
||||
return !isAnalyticsDisabled()
|
||||
}
|
||||
|
||||
/**
|
||||
* Log a 1st-party event for internal analytics (async version).
|
||||
* Events are batched and exported to /api/event_logging/batch
|
||||
*
|
||||
* This enriches the event with core metadata (model, session, env context, etc.)
|
||||
* at log time, similar to logEventToStatsig.
|
||||
*
|
||||
* @param eventName - Name of the event (e.g., 'tengu_api_query')
|
||||
* @param metadata - Additional metadata for the event (intentionally no strings, to avoid accidentally logging code/filepaths)
|
||||
*/
|
||||
async function logEventTo1PAsync(
|
||||
firstPartyEventLogger: Logger,
|
||||
eventName: string,
|
||||
metadata: Record<string, number | boolean | undefined> = {},
|
||||
): Promise<void> {
|
||||
try {
|
||||
// Enrich with core metadata at log time (similar to Statsig pattern)
|
||||
const coreMetadata = await getEventMetadata({
|
||||
model: metadata.model,
|
||||
betas: metadata.betas,
|
||||
})
|
||||
|
||||
// Build attributes - OTel supports nested objects natively via AnyValueMap
|
||||
// Cast through unknown since our nested objects are structurally compatible
|
||||
// with AnyValue but TS doesn't recognize it due to missing index signatures
|
||||
const attributes = {
|
||||
event_name: eventName,
|
||||
event_id: randomUUID(),
|
||||
// Pass objects directly - no JSON serialization needed
|
||||
core_metadata: coreMetadata,
|
||||
user_metadata: getCoreUserData(true),
|
||||
event_metadata: metadata,
|
||||
} as unknown as AnyValueMap
|
||||
|
||||
// Add user_id if available
|
||||
const userId = getOrCreateUserID()
|
||||
if (userId) {
|
||||
attributes.user_id = userId
|
||||
}
|
||||
|
||||
// Debug logging when debug mode is enabled
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging(
|
||||
`[ANT-ONLY] 1P event: ${eventName} ${jsonStringify(metadata, null, 0)}`,
|
||||
)
|
||||
}
|
||||
|
||||
// Emit log record
|
||||
firstPartyEventLogger.emit({
|
||||
body: eventName,
|
||||
attributes,
|
||||
})
|
||||
} catch (e) {
|
||||
if (process.env.NODE_ENV === 'development') {
|
||||
throw e
|
||||
}
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logError(e as Error)
|
||||
}
|
||||
// swallow
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Log a 1st-party event for internal analytics.
|
||||
* Events are batched and exported to /api/event_logging/batch
|
||||
*
|
||||
* @param eventName - Name of the event (e.g., 'tengu_api_query')
|
||||
* @param metadata - Additional metadata for the event (intentionally no strings, to avoid accidentally logging code/filepaths)
|
||||
*/
|
||||
export function logEventTo1P(
|
||||
eventName: string,
|
||||
metadata: Record<string, number | boolean | undefined> = {},
|
||||
): void {
|
||||
if (!is1PEventLoggingEnabled()) {
|
||||
return
|
||||
}
|
||||
|
||||
if (!firstPartyEventLogger || isSinkKilled('firstParty')) {
|
||||
return
|
||||
}
|
||||
|
||||
// Fire and forget - don't block on metadata enrichment
|
||||
void logEventTo1PAsync(firstPartyEventLogger, eventName, metadata)
|
||||
}
|
||||
|
||||
/**
|
||||
* GrowthBook experiment event data for logging
|
||||
*/
|
||||
export type GrowthBookExperimentData = {
|
||||
experimentId: string
|
||||
variationId: number
|
||||
userAttributes?: GrowthBookUserAttributes
|
||||
experimentMetadata?: Record<string, unknown>
|
||||
}
|
||||
|
||||
// api.anthropic.com only serves the "production" GrowthBook environment
|
||||
// (see starling/starling/cli/cli.py DEFAULT_ENVIRONMENTS). Staging and
|
||||
// development environments are not exported to the prod API.
|
||||
function getEnvironmentForGrowthBook(): string {
|
||||
return 'production'
|
||||
}
|
||||
|
||||
/**
|
||||
* Log a GrowthBook experiment assignment event to 1P.
|
||||
* Events are batched and exported to /api/event_logging/batch
|
||||
*
|
||||
* @param data - GrowthBook experiment assignment data
|
||||
*/
|
||||
export function logGrowthBookExperimentTo1P(
|
||||
data: GrowthBookExperimentData,
|
||||
): void {
|
||||
if (!is1PEventLoggingEnabled()) {
|
||||
return
|
||||
}
|
||||
|
||||
if (!firstPartyEventLogger || isSinkKilled('firstParty')) {
|
||||
return
|
||||
}
|
||||
|
||||
const userId = getOrCreateUserID()
|
||||
const { accountUuid, organizationUuid } = getCoreUserData(true)
|
||||
|
||||
// Build attributes for GrowthbookExperimentEvent
|
||||
const attributes = {
|
||||
event_type: 'GrowthbookExperimentEvent',
|
||||
event_id: randomUUID(),
|
||||
experiment_id: data.experimentId,
|
||||
variation_id: data.variationId,
|
||||
...(userId && { device_id: userId }),
|
||||
...(accountUuid && { account_uuid: accountUuid }),
|
||||
...(organizationUuid && { organization_uuid: organizationUuid }),
|
||||
...(data.userAttributes && {
|
||||
session_id: data.userAttributes.sessionId,
|
||||
user_attributes: jsonStringify(data.userAttributes),
|
||||
}),
|
||||
...(data.experimentMetadata && {
|
||||
experiment_metadata: jsonStringify(data.experimentMetadata),
|
||||
}),
|
||||
environment: getEnvironmentForGrowthBook(),
|
||||
}
|
||||
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging(
|
||||
`[ANT-ONLY] 1P GrowthBook experiment: ${data.experimentId} variation=${data.variationId}`,
|
||||
)
|
||||
}
|
||||
|
||||
firstPartyEventLogger.emit({
|
||||
body: 'growthbook_experiment',
|
||||
attributes,
|
||||
})
|
||||
}
|
||||
|
||||
const DEFAULT_LOGS_EXPORT_INTERVAL_MS = 10000
|
||||
const DEFAULT_MAX_EXPORT_BATCH_SIZE = 200
|
||||
const DEFAULT_MAX_QUEUE_SIZE = 8192
|
||||
|
||||
/**
|
||||
* Initialize 1P event logging infrastructure.
|
||||
* This creates a separate LoggerProvider for internal event logging,
|
||||
* independent of customer OTLP telemetry.
|
||||
*
|
||||
* This uses its own minimal resource configuration with just the attributes
|
||||
* we need for internal analytics (service name, version, platform info).
|
||||
*/
|
||||
export function initialize1PEventLogging(): void {
|
||||
profileCheckpoint('1p_event_logging_start')
|
||||
const enabled = is1PEventLoggingEnabled()
|
||||
|
||||
if (!enabled) {
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging('1P event logging not enabled')
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch batch processor configuration from GrowthBook dynamic config
|
||||
// Uses cached value if available, refreshes in background
|
||||
const batchConfig = getBatchConfig()
|
||||
lastBatchConfig = batchConfig
|
||||
profileCheckpoint('1p_event_after_growthbook_config')
|
||||
|
||||
const scheduledDelayMillis =
|
||||
batchConfig.scheduledDelayMillis ||
|
||||
parseInt(
|
||||
process.env.OTEL_LOGS_EXPORT_INTERVAL ||
|
||||
DEFAULT_LOGS_EXPORT_INTERVAL_MS.toString(),
|
||||
)
|
||||
|
||||
const maxExportBatchSize =
|
||||
batchConfig.maxExportBatchSize || DEFAULT_MAX_EXPORT_BATCH_SIZE
|
||||
|
||||
const maxQueueSize = batchConfig.maxQueueSize || DEFAULT_MAX_QUEUE_SIZE
|
||||
|
||||
// Build our own resource for 1P event logging with minimal attributes
|
||||
const platform = getPlatform()
|
||||
const attributes: Record<string, string> = {
|
||||
[ATTR_SERVICE_NAME]: 'claude-code',
|
||||
[ATTR_SERVICE_VERSION]: MACRO.VERSION,
|
||||
}
|
||||
|
||||
// Add WSL-specific attributes if running on WSL
|
||||
if (platform === 'wsl') {
|
||||
const wslVersion = getWslVersion()
|
||||
if (wslVersion) {
|
||||
attributes['wsl.version'] = wslVersion
|
||||
}
|
||||
}
|
||||
|
||||
const resource = resourceFromAttributes(attributes)
|
||||
|
||||
// Create a new LoggerProvider with the EventLoggingExporter
|
||||
// NOTE: This is kept separate from customer telemetry logs to ensure
|
||||
// internal events don't leak to customer endpoints and vice versa.
|
||||
// We don't register this globally - it's only used for internal event logging.
|
||||
const eventLoggingExporter = new FirstPartyEventLoggingExporter({
|
||||
maxBatchSize: maxExportBatchSize,
|
||||
skipAuth: batchConfig.skipAuth,
|
||||
maxAttempts: batchConfig.maxAttempts,
|
||||
path: batchConfig.path,
|
||||
baseUrl: batchConfig.baseUrl,
|
||||
isKilled: () => isSinkKilled('firstParty'),
|
||||
})
|
||||
firstPartyEventLoggerProvider = new LoggerProvider({
|
||||
resource,
|
||||
processors: [
|
||||
new BatchLogRecordProcessor(eventLoggingExporter, {
|
||||
scheduledDelayMillis,
|
||||
maxExportBatchSize,
|
||||
maxQueueSize,
|
||||
}),
|
||||
],
|
||||
})
|
||||
|
||||
// Initialize event logger from our internal provider (NOT from global API)
|
||||
// IMPORTANT: We must get the logger from our local provider, not logs.getLogger()
|
||||
// because logs.getLogger() returns a logger from the global provider, which is
|
||||
// separate and used for customer telemetry.
|
||||
firstPartyEventLogger = firstPartyEventLoggerProvider.getLogger(
|
||||
'com.anthropic.claude_code.events',
|
||||
MACRO.VERSION,
|
||||
)
|
||||
// No-op
|
||||
}
|
||||
|
||||
export function logEventTo1P(
|
||||
_eventName: string,
|
||||
_metadata: Record<string, number | boolean | undefined> = {},
|
||||
): void {
|
||||
// No-op
|
||||
}
|
||||
|
||||
export function logGrowthBookExperimentTo1P(_data: unknown): void {
|
||||
// No-op
|
||||
}
|
||||
|
||||
export async function shutdown1PEventLogging(): Promise<void> {
|
||||
// No-op
|
||||
}
|
||||
|
||||
/**
|
||||
* Rebuild the 1P event logging pipeline if the batch config changed.
|
||||
* Register this with onGrowthBookRefresh so long-running sessions pick up
|
||||
* changes to batch size, delay, endpoint, etc.
|
||||
*
|
||||
* Event-loss safety:
|
||||
* 1. Null the logger first — concurrent logEventTo1P() calls hit the
|
||||
* !firstPartyEventLogger guard and bail during the swap window. This drops
|
||||
* a handful of events but prevents emitting to a draining provider.
|
||||
* 2. forceFlush() drains the old BatchLogRecordProcessor buffer to the
|
||||
* exporter. Export failures go to disk at getCurrentBatchFilePath() which
|
||||
* is keyed by module-level BATCH_UUID + sessionId — unchanged across
|
||||
* reinit — so the NEW exporter's disk-backed retry picks them up.
|
||||
* 3. Swap to new provider/logger; old provider shutdown runs in background
|
||||
* (buffer already drained, just cleanup).
|
||||
*/
|
||||
export async function reinitialize1PEventLoggingIfConfigChanged(): Promise<void> {
|
||||
if (!is1PEventLoggingEnabled() || !firstPartyEventLoggerProvider) {
|
||||
return
|
||||
}
|
||||
|
||||
const newConfig = getBatchConfig()
|
||||
|
||||
if (isEqual(newConfig, lastBatchConfig)) {
|
||||
return
|
||||
}
|
||||
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging(
|
||||
`1P event logging: ${BATCH_CONFIG_NAME} changed, reinitializing`,
|
||||
)
|
||||
}
|
||||
|
||||
const oldProvider = firstPartyEventLoggerProvider
|
||||
const oldLogger = firstPartyEventLogger
|
||||
firstPartyEventLogger = null
|
||||
|
||||
try {
|
||||
await oldProvider.forceFlush()
|
||||
} catch {
|
||||
// Export failures are already on disk; new exporter will retry them.
|
||||
}
|
||||
|
||||
firstPartyEventLoggerProvider = null
|
||||
try {
|
||||
initialize1PEventLogging()
|
||||
} catch (e) {
|
||||
// Restore so the next GrowthBook refresh can retry. oldProvider was
|
||||
// only forceFlush()'d, not shut down — it's still functional. Without
|
||||
// this, both stay null and the !firstPartyEventLoggerProvider gate at
|
||||
// the top makes recovery impossible.
|
||||
firstPartyEventLoggerProvider = oldProvider
|
||||
firstPartyEventLogger = oldLogger
|
||||
logError(e)
|
||||
return
|
||||
}
|
||||
|
||||
void oldProvider.shutdown().catch(() => {})
|
||||
// No-op
|
||||
}
|
||||
|
||||
export function is1PEventLoggingEnabled(): boolean {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,806 +0,0 @@
|
||||
import type { HrTime } from '@opentelemetry/api'
|
||||
import { type ExportResult, ExportResultCode } from '@opentelemetry/core'
|
||||
import type {
|
||||
LogRecordExporter,
|
||||
ReadableLogRecord,
|
||||
} from '@opentelemetry/sdk-logs'
|
||||
import axios from 'axios'
|
||||
import { randomUUID } from 'crypto'
|
||||
import { appendFile, mkdir, readdir, unlink, writeFile } from 'fs/promises'
|
||||
import * as path from 'path'
|
||||
import type { CoreUserData } from 'src/utils/user.js'
|
||||
import {
|
||||
getIsNonInteractiveSession,
|
||||
getSessionId,
|
||||
} from '../../bootstrap/state.js'
|
||||
import { ClaudeCodeInternalEvent } from '../../types/generated/events_mono/claude_code/v1/claude_code_internal_event.js'
|
||||
import { GrowthbookExperimentEvent } from '../../types/generated/events_mono/growthbook/v1/growthbook_experiment_event.js'
|
||||
import {
|
||||
getClaudeAIOAuthTokens,
|
||||
hasProfileScope,
|
||||
isClaudeAISubscriber,
|
||||
} from '../../utils/auth.js'
|
||||
import { checkHasTrustDialogAccepted } from '../../utils/config.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { getClaudeConfigHomeDir } from '../../utils/envUtils.js'
|
||||
import { errorMessage, isFsInaccessible, toError } from '../../utils/errors.js'
|
||||
import { getAuthHeaders } from '../../utils/http.js'
|
||||
import { readJSONLFile } from '../../utils/json.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
import { sleep } from '../../utils/sleep.js'
|
||||
import { jsonStringify } from '../../utils/slowOperations.js'
|
||||
import { getClaudeCodeUserAgent } from '../../utils/userAgent.js'
|
||||
import { isOAuthTokenExpired } from '../oauth/client.js'
|
||||
import { stripProtoFields } from './index.js'
|
||||
import { type EventMetadata, to1PEventFormat } from './metadata.js'
|
||||
|
||||
// Unique ID for this process run - used to isolate failed event files between runs
|
||||
const BATCH_UUID = randomUUID()
|
||||
|
||||
// File prefix for failed event storage
|
||||
const FILE_PREFIX = '1p_failed_events.'
|
||||
|
||||
// Storage directory for failed events - evaluated at runtime to respect CLAUDE_CONFIG_DIR in tests
|
||||
function getStorageDir(): string {
|
||||
return path.join(getClaudeConfigHomeDir(), 'telemetry')
|
||||
}
|
||||
|
||||
// API envelope - event_data is the JSON output from proto toJSON()
|
||||
type FirstPartyEventLoggingEvent = {
|
||||
event_type: 'ClaudeCodeInternalEvent' | 'GrowthbookExperimentEvent'
|
||||
event_data: unknown
|
||||
}
|
||||
|
||||
type FirstPartyEventLoggingPayload = {
|
||||
events: FirstPartyEventLoggingEvent[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Exporter for 1st-party event logging to /api/event_logging/batch.
|
||||
*
|
||||
* Export cycles are controlled by OpenTelemetry's BatchLogRecordProcessor, which
|
||||
* triggers export() when either:
|
||||
* - Time interval elapses (default: 5 seconds via scheduledDelayMillis)
|
||||
* - Batch size is reached (default: 200 events via maxExportBatchSize)
|
||||
*
|
||||
* This exporter adds resilience on top:
|
||||
* - Append-only log for failed events (concurrency-safe)
|
||||
* - Quadratic backoff retry for failed events, dropped after maxAttempts
|
||||
* - Immediate retry of queued events when any export succeeds (endpoint is healthy)
|
||||
* - Chunking large event sets into smaller batches
|
||||
* - Auth fallback: retries without auth on 401 errors
|
||||
*/
|
||||
export class FirstPartyEventLoggingExporter implements LogRecordExporter {
|
||||
private readonly endpoint: string
|
||||
private readonly timeout: number
|
||||
private readonly maxBatchSize: number
|
||||
private readonly skipAuth: boolean
|
||||
private readonly batchDelayMs: number
|
||||
private readonly baseBackoffDelayMs: number
|
||||
private readonly maxBackoffDelayMs: number
|
||||
private readonly maxAttempts: number
|
||||
private readonly isKilled: () => boolean
|
||||
private pendingExports: Promise<void>[] = []
|
||||
private isShutdown = false
|
||||
private readonly schedule: (
|
||||
fn: () => Promise<void>,
|
||||
delayMs: number,
|
||||
) => () => void
|
||||
private cancelBackoff: (() => void) | null = null
|
||||
private attempts = 0
|
||||
private isRetrying = false
|
||||
private lastExportErrorContext: string | undefined
|
||||
|
||||
constructor(
|
||||
options: {
|
||||
timeout?: number
|
||||
maxBatchSize?: number
|
||||
skipAuth?: boolean
|
||||
batchDelayMs?: number
|
||||
baseBackoffDelayMs?: number
|
||||
maxBackoffDelayMs?: number
|
||||
maxAttempts?: number
|
||||
path?: string
|
||||
baseUrl?: string
|
||||
// Injected killswitch probe. Checked per-POST so that disabling the
|
||||
// firstParty sink also stops backoff retries (not just new emits).
|
||||
// Passed in rather than imported to avoid a cycle with firstPartyEventLogger.ts.
|
||||
isKilled?: () => boolean
|
||||
schedule?: (fn: () => Promise<void>, delayMs: number) => () => void
|
||||
} = {},
|
||||
) {
|
||||
// Default: prod, except when ANTHROPIC_BASE_URL is explicitly staging.
|
||||
// Overridable via tengu_1p_event_batch_config.baseUrl.
|
||||
const baseUrl =
|
||||
options.baseUrl ||
|
||||
(process.env.ANTHROPIC_BASE_URL === 'https://api-staging.anthropic.com'
|
||||
? 'https://api-staging.anthropic.com'
|
||||
: 'https://api.anthropic.com')
|
||||
|
||||
this.endpoint = `${baseUrl}${options.path || '/api/event_logging/batch'}`
|
||||
|
||||
this.timeout = options.timeout || 10000
|
||||
this.maxBatchSize = options.maxBatchSize || 200
|
||||
this.skipAuth = options.skipAuth ?? false
|
||||
this.batchDelayMs = options.batchDelayMs || 100
|
||||
this.baseBackoffDelayMs = options.baseBackoffDelayMs || 500
|
||||
this.maxBackoffDelayMs = options.maxBackoffDelayMs || 30000
|
||||
this.maxAttempts = options.maxAttempts ?? 8
|
||||
this.isKilled = options.isKilled ?? (() => false)
|
||||
this.schedule =
|
||||
options.schedule ??
|
||||
((fn, ms) => {
|
||||
const t = setTimeout(fn, ms)
|
||||
return () => clearTimeout(t)
|
||||
})
|
||||
|
||||
// Retry any failed events from previous runs of this session (in background)
|
||||
void this.retryPreviousBatches()
|
||||
}
|
||||
|
||||
// Expose for testing
|
||||
async getQueuedEventCount(): Promise<number> {
|
||||
return (await this.loadEventsFromCurrentBatch()).length
|
||||
}
|
||||
|
||||
// --- Storage helpers ---
|
||||
|
||||
private getCurrentBatchFilePath(): string {
|
||||
return path.join(
|
||||
getStorageDir(),
|
||||
`${FILE_PREFIX}${getSessionId()}.${BATCH_UUID}.json`,
|
||||
)
|
||||
}
|
||||
|
||||
private async loadEventsFromFile(
|
||||
filePath: string,
|
||||
): Promise<FirstPartyEventLoggingEvent[]> {
|
||||
try {
|
||||
return await readJSONLFile<FirstPartyEventLoggingEvent>(filePath)
|
||||
} catch {
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
private async loadEventsFromCurrentBatch(): Promise<
|
||||
FirstPartyEventLoggingEvent[]
|
||||
> {
|
||||
return this.loadEventsFromFile(this.getCurrentBatchFilePath())
|
||||
}
|
||||
|
||||
private async saveEventsToFile(
|
||||
filePath: string,
|
||||
events: FirstPartyEventLoggingEvent[],
|
||||
): Promise<void> {
|
||||
try {
|
||||
if (events.length === 0) {
|
||||
try {
|
||||
await unlink(filePath)
|
||||
} catch {
|
||||
// File doesn't exist, nothing to delete
|
||||
}
|
||||
} else {
|
||||
// Ensure storage directory exists
|
||||
await mkdir(getStorageDir(), { recursive: true })
|
||||
// Write as JSON lines (one event per line)
|
||||
const content = events.map(e => jsonStringify(e)).join('\n') + '\n'
|
||||
await writeFile(filePath, content, 'utf8')
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error)
|
||||
}
|
||||
}
|
||||
|
||||
private async appendEventsToFile(
|
||||
filePath: string,
|
||||
events: FirstPartyEventLoggingEvent[],
|
||||
): Promise<void> {
|
||||
if (events.length === 0) return
|
||||
try {
|
||||
// Ensure storage directory exists
|
||||
await mkdir(getStorageDir(), { recursive: true })
|
||||
// Append as JSON lines (one event per line) - atomic on most filesystems
|
||||
const content = events.map(e => jsonStringify(e)).join('\n') + '\n'
|
||||
await appendFile(filePath, content, 'utf8')
|
||||
} catch (error) {
|
||||
logError(error)
|
||||
}
|
||||
}
|
||||
|
||||
private async deleteFile(filePath: string): Promise<void> {
|
||||
try {
|
||||
await unlink(filePath)
|
||||
} catch {
|
||||
// File doesn't exist or can't be deleted, ignore
|
||||
}
|
||||
}
|
||||
|
||||
// --- Previous batch retry (startup) ---
|
||||
|
||||
private async retryPreviousBatches(): Promise<void> {
|
||||
try {
|
||||
const prefix = `${FILE_PREFIX}${getSessionId()}.`
|
||||
let files: string[]
|
||||
try {
|
||||
files = (await readdir(getStorageDir()))
|
||||
.filter((f: string) => f.startsWith(prefix) && f.endsWith('.json'))
|
||||
.filter((f: string) => !f.includes(BATCH_UUID)) // Exclude current batch
|
||||
} catch (e) {
|
||||
if (isFsInaccessible(e)) return
|
||||
throw e
|
||||
}
|
||||
|
||||
for (const file of files) {
|
||||
const filePath = path.join(getStorageDir(), file)
|
||||
void this.retryFileInBackground(filePath)
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error)
|
||||
}
|
||||
}
|
||||
|
||||
private async retryFileInBackground(filePath: string): Promise<void> {
|
||||
if (this.attempts >= this.maxAttempts) {
|
||||
await this.deleteFile(filePath)
|
||||
return
|
||||
}
|
||||
|
||||
const events = await this.loadEventsFromFile(filePath)
|
||||
if (events.length === 0) {
|
||||
await this.deleteFile(filePath)
|
||||
return
|
||||
}
|
||||
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging(
|
||||
`1P event logging: retrying ${events.length} events from previous batch`,
|
||||
)
|
||||
}
|
||||
|
||||
const failedEvents = await this.sendEventsInBatches(events)
|
||||
if (failedEvents.length === 0) {
|
||||
await this.deleteFile(filePath)
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging('1P event logging: previous batch retry succeeded')
|
||||
}
|
||||
} else {
|
||||
// Save only the failed events back (not all original events)
|
||||
await this.saveEventsToFile(filePath, failedEvents)
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging(
|
||||
`1P event logging: previous batch retry failed, ${failedEvents.length} events remain`,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async export(
|
||||
logs: ReadableLogRecord[],
|
||||
resultCallback: (result: ExportResult) => void,
|
||||
): Promise<void> {
|
||||
if (this.isShutdown) {
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging(
|
||||
'1P event logging export failed: Exporter has been shutdown',
|
||||
)
|
||||
}
|
||||
resultCallback({
|
||||
code: ExportResultCode.FAILED,
|
||||
error: new Error('Exporter has been shutdown'),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const exportPromise = this.doExport(logs, resultCallback)
|
||||
this.pendingExports.push(exportPromise)
|
||||
|
||||
// Clean up completed exports
|
||||
void exportPromise.finally(() => {
|
||||
const index = this.pendingExports.indexOf(exportPromise)
|
||||
if (index > -1) {
|
||||
void this.pendingExports.splice(index, 1)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
private async doExport(
|
||||
logs: ReadableLogRecord[],
|
||||
resultCallback: (result: ExportResult) => void,
|
||||
): Promise<void> {
|
||||
try {
|
||||
// Filter for event logs only (by scope name)
|
||||
const eventLogs = logs.filter(
|
||||
log =>
|
||||
log.instrumentationScope?.name === 'com.anthropic.claude_code.events',
|
||||
)
|
||||
|
||||
if (eventLogs.length === 0) {
|
||||
resultCallback({ code: ExportResultCode.SUCCESS })
|
||||
return
|
||||
}
|
||||
|
||||
// Transform new logs (failed events are retried independently via backoff)
|
||||
const events = this.transformLogsToEvents(eventLogs).events
|
||||
|
||||
if (events.length === 0) {
|
||||
resultCallback({ code: ExportResultCode.SUCCESS })
|
||||
return
|
||||
}
|
||||
|
||||
if (this.attempts >= this.maxAttempts) {
|
||||
resultCallback({
|
||||
code: ExportResultCode.FAILED,
|
||||
error: new Error(
|
||||
`Dropped ${events.length} events: max attempts (${this.maxAttempts}) reached`,
|
||||
),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Send events
|
||||
const failedEvents = await this.sendEventsInBatches(events)
|
||||
this.attempts++
|
||||
|
||||
if (failedEvents.length > 0) {
|
||||
await this.queueFailedEvents(failedEvents)
|
||||
this.scheduleBackoffRetry()
|
||||
const context = this.lastExportErrorContext
|
||||
? ` (${this.lastExportErrorContext})`
|
||||
: ''
|
||||
resultCallback({
|
||||
code: ExportResultCode.FAILED,
|
||||
error: new Error(
|
||||
`Failed to export ${failedEvents.length} events${context}`,
|
||||
),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Success - reset backoff and immediately retry any queued events
|
||||
this.resetBackoff()
|
||||
if ((await this.getQueuedEventCount()) > 0 && !this.isRetrying) {
|
||||
void this.retryFailedEvents()
|
||||
}
|
||||
resultCallback({ code: ExportResultCode.SUCCESS })
|
||||
} catch (error) {
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging(
|
||||
`1P event logging export failed: ${errorMessage(error)}`,
|
||||
)
|
||||
}
|
||||
logError(error)
|
||||
resultCallback({
|
||||
code: ExportResultCode.FAILED,
|
||||
error: toError(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
private async sendEventsInBatches(
|
||||
events: FirstPartyEventLoggingEvent[],
|
||||
): Promise<FirstPartyEventLoggingEvent[]> {
|
||||
// Chunk events into batches
|
||||
const batches: FirstPartyEventLoggingEvent[][] = []
|
||||
for (let i = 0; i < events.length; i += this.maxBatchSize) {
|
||||
batches.push(events.slice(i, i + this.maxBatchSize))
|
||||
}
|
||||
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging(
|
||||
`1P event logging: exporting ${events.length} events in ${batches.length} batch(es)`,
|
||||
)
|
||||
}
|
||||
|
||||
// Send each batch with delay between them. On first failure, assume the
|
||||
// endpoint is down and short-circuit: queue the failed batch plus all
|
||||
// remaining unsent batches without POSTing them. The backoff retry will
|
||||
// probe again with a single batch next tick.
|
||||
const failedBatchEvents: FirstPartyEventLoggingEvent[] = []
|
||||
let lastErrorContext: string | undefined
|
||||
for (let i = 0; i < batches.length; i++) {
|
||||
const batch = batches[i]!
|
||||
try {
|
||||
await this.sendBatchWithRetry({ events: batch })
|
||||
} catch (error) {
|
||||
lastErrorContext = getAxiosErrorContext(error)
|
||||
for (let j = i; j < batches.length; j++) {
|
||||
failedBatchEvents.push(...batches[j]!)
|
||||
}
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
const skipped = batches.length - 1 - i
|
||||
logForDebugging(
|
||||
`1P event logging: batch ${i + 1}/${batches.length} failed (${lastErrorContext}); short-circuiting ${skipped} remaining batch(es)`,
|
||||
)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if (i < batches.length - 1 && this.batchDelayMs > 0) {
|
||||
await sleep(this.batchDelayMs)
|
||||
}
|
||||
}
|
||||
|
||||
if (failedBatchEvents.length > 0 && lastErrorContext) {
|
||||
this.lastExportErrorContext = lastErrorContext
|
||||
}
|
||||
|
||||
return failedBatchEvents
|
||||
}
|
||||
|
||||
private async queueFailedEvents(
|
||||
events: FirstPartyEventLoggingEvent[],
|
||||
): Promise<void> {
|
||||
const filePath = this.getCurrentBatchFilePath()
|
||||
|
||||
// Append-only: just add new events to file (atomic on most filesystems)
|
||||
await this.appendEventsToFile(filePath, events)
|
||||
|
||||
const context = this.lastExportErrorContext
|
||||
? ` (${this.lastExportErrorContext})`
|
||||
: ''
|
||||
const message = `1P event logging: ${events.length} events failed to export${context}`
|
||||
logError(new Error(message))
|
||||
}
|
||||
|
||||
private scheduleBackoffRetry(): void {
|
||||
// Don't schedule if already retrying or shutdown
|
||||
if (this.cancelBackoff || this.isRetrying || this.isShutdown) {
|
||||
return
|
||||
}
|
||||
|
||||
// Quadratic backoff (matching Statsig SDK): base * attempts²
|
||||
const delay = Math.min(
|
||||
this.baseBackoffDelayMs * this.attempts * this.attempts,
|
||||
this.maxBackoffDelayMs,
|
||||
)
|
||||
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging(
|
||||
`1P event logging: scheduling backoff retry in ${delay}ms (attempt ${this.attempts})`,
|
||||
)
|
||||
}
|
||||
|
||||
this.cancelBackoff = this.schedule(async () => {
|
||||
this.cancelBackoff = null
|
||||
await this.retryFailedEvents()
|
||||
}, delay)
|
||||
}
|
||||
|
||||
private async retryFailedEvents(): Promise<void> {
|
||||
const filePath = this.getCurrentBatchFilePath()
|
||||
|
||||
// Keep retrying while there are events and endpoint is healthy
|
||||
while (!this.isShutdown) {
|
||||
const events = await this.loadEventsFromFile(filePath)
|
||||
if (events.length === 0) break
|
||||
|
||||
if (this.attempts >= this.maxAttempts) {
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging(
|
||||
`1P event logging: max attempts (${this.maxAttempts}) reached, dropping ${events.length} events`,
|
||||
)
|
||||
}
|
||||
await this.deleteFile(filePath)
|
||||
this.resetBackoff()
|
||||
return
|
||||
}
|
||||
|
||||
this.isRetrying = true
|
||||
|
||||
// Clear file before retry (we have events in memory now)
|
||||
await this.deleteFile(filePath)
|
||||
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging(
|
||||
`1P event logging: retrying ${events.length} failed events (attempt ${this.attempts + 1})`,
|
||||
)
|
||||
}
|
||||
|
||||
const failedEvents = await this.sendEventsInBatches(events)
|
||||
this.attempts++
|
||||
|
||||
this.isRetrying = false
|
||||
|
||||
if (failedEvents.length > 0) {
|
||||
// Write failures back to disk
|
||||
await this.saveEventsToFile(filePath, failedEvents)
|
||||
this.scheduleBackoffRetry()
|
||||
return // Failed - wait for backoff
|
||||
}
|
||||
|
||||
// Success - reset backoff and continue loop to drain any newly queued events
|
||||
this.resetBackoff()
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging('1P event logging: backoff retry succeeded')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private resetBackoff(): void {
|
||||
this.attempts = 0
|
||||
if (this.cancelBackoff) {
|
||||
this.cancelBackoff()
|
||||
this.cancelBackoff = null
|
||||
}
|
||||
}
|
||||
|
||||
private async sendBatchWithRetry(
|
||||
payload: FirstPartyEventLoggingPayload,
|
||||
): Promise<void> {
|
||||
if (this.isKilled()) {
|
||||
// Throw so the caller short-circuits remaining batches and queues
|
||||
// everything to disk. Zero network traffic while killed; the backoff
|
||||
// timer keeps ticking and will resume POSTs as soon as the GrowthBook
|
||||
// cache picks up the cleared flag.
|
||||
throw new Error('firstParty sink killswitch active')
|
||||
}
|
||||
|
||||
const baseHeaders: Record<string, string> = {
|
||||
'Content-Type': 'application/json',
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
'x-service-name': 'claude-code',
|
||||
}
|
||||
|
||||
// Skip auth if trust hasn't been established yet
|
||||
// This prevents executing apiKeyHelper commands before the trust dialog
|
||||
// Non-interactive sessions implicitly have workspace trust
|
||||
const hasTrust =
|
||||
checkHasTrustDialogAccepted() || getIsNonInteractiveSession()
|
||||
if (process.env.USER_TYPE === 'ant' && !hasTrust) {
|
||||
logForDebugging('1P event logging: Trust not accepted')
|
||||
}
|
||||
|
||||
// Skip auth when the OAuth token is expired or lacks user:profile
|
||||
// scope (service key sessions). Falls through to unauthenticated send.
|
||||
let shouldSkipAuth = this.skipAuth || !hasTrust
|
||||
if (!shouldSkipAuth && isClaudeAISubscriber()) {
|
||||
const tokens = getClaudeAIOAuthTokens()
|
||||
if (!hasProfileScope()) {
|
||||
shouldSkipAuth = true
|
||||
} else if (tokens && isOAuthTokenExpired(tokens.expiresAt)) {
|
||||
shouldSkipAuth = true
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging(
|
||||
'1P event logging: OAuth token expired, skipping auth to avoid 401',
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try with auth headers first (unless trust not established or token is known to be expired)
|
||||
const authResult = shouldSkipAuth
|
||||
? { headers: {}, error: 'trust not established or Oauth token expired' }
|
||||
: getAuthHeaders()
|
||||
const useAuth = !authResult.error
|
||||
|
||||
if (!useAuth && process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging(
|
||||
`1P event logging: auth not available, sending without auth`,
|
||||
)
|
||||
}
|
||||
|
||||
const headers = useAuth
|
||||
? { ...baseHeaders, ...authResult.headers }
|
||||
: baseHeaders
|
||||
|
||||
try {
|
||||
const response = await axios.post(this.endpoint, payload, {
|
||||
timeout: this.timeout,
|
||||
headers,
|
||||
})
|
||||
this.logSuccess(payload.events.length, useAuth, response.data)
|
||||
return
|
||||
} catch (error) {
|
||||
// Handle 401 by retrying without auth
|
||||
if (
|
||||
useAuth &&
|
||||
axios.isAxiosError(error) &&
|
||||
error.response?.status === 401
|
||||
) {
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging(
|
||||
'1P event logging: 401 auth error, retrying without auth',
|
||||
)
|
||||
}
|
||||
const response = await axios.post(this.endpoint, payload, {
|
||||
timeout: this.timeout,
|
||||
headers: baseHeaders,
|
||||
})
|
||||
this.logSuccess(payload.events.length, false, response.data)
|
||||
return
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
private logSuccess(
|
||||
eventCount: number,
|
||||
withAuth: boolean,
|
||||
responseData: unknown,
|
||||
): void {
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging(
|
||||
`1P event logging: ${eventCount} events exported successfully${withAuth ? ' (with auth)' : ' (without auth)'}`,
|
||||
)
|
||||
logForDebugging(`API Response: ${jsonStringify(responseData, null, 2)}`)
|
||||
}
|
||||
}
|
||||
|
||||
private hrTimeToDate(hrTime: HrTime): Date {
|
||||
const [seconds, nanoseconds] = hrTime
|
||||
return new Date(seconds * 1000 + nanoseconds / 1000000)
|
||||
}
|
||||
|
||||
private transformLogsToEvents(
|
||||
logs: ReadableLogRecord[],
|
||||
): FirstPartyEventLoggingPayload {
|
||||
const events: FirstPartyEventLoggingEvent[] = []
|
||||
|
||||
for (const log of logs) {
|
||||
const attributes = log.attributes || {}
|
||||
|
||||
// Check if this is a GrowthBook experiment event
|
||||
if (attributes.event_type === 'GrowthbookExperimentEvent') {
|
||||
const timestamp = this.hrTimeToDate(log.hrTime)
|
||||
const account_uuid = attributes.account_uuid as string | undefined
|
||||
const organization_uuid = attributes.organization_uuid as
|
||||
| string
|
||||
| undefined
|
||||
events.push({
|
||||
event_type: 'GrowthbookExperimentEvent',
|
||||
event_data: GrowthbookExperimentEvent.toJSON({
|
||||
event_id: attributes.event_id as string,
|
||||
timestamp,
|
||||
experiment_id: attributes.experiment_id as string,
|
||||
variation_id: attributes.variation_id as number,
|
||||
environment: attributes.environment as string,
|
||||
user_attributes: attributes.user_attributes as string,
|
||||
experiment_metadata: attributes.experiment_metadata as string,
|
||||
device_id: attributes.device_id as string,
|
||||
session_id: attributes.session_id as string,
|
||||
auth:
|
||||
account_uuid || organization_uuid
|
||||
? { account_uuid, organization_uuid }
|
||||
: undefined,
|
||||
}),
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract event name
|
||||
const eventName =
|
||||
(attributes.event_name as string) || (log.body as string) || 'unknown'
|
||||
|
||||
// Extract metadata objects directly (no JSON parsing needed)
|
||||
const coreMetadata = attributes.core_metadata as EventMetadata | undefined
|
||||
const userMetadata = attributes.user_metadata as CoreUserData
|
||||
const eventMetadata = (attributes.event_metadata || {}) as Record<
|
||||
string,
|
||||
unknown
|
||||
>
|
||||
|
||||
if (!coreMetadata) {
|
||||
// Emit partial event if core metadata is missing
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging(
|
||||
`1P event logging: core_metadata missing for event ${eventName}`,
|
||||
)
|
||||
}
|
||||
events.push({
|
||||
event_type: 'ClaudeCodeInternalEvent',
|
||||
event_data: ClaudeCodeInternalEvent.toJSON({
|
||||
event_id: attributes.event_id as string | undefined,
|
||||
event_name: eventName,
|
||||
client_timestamp: this.hrTimeToDate(log.hrTime),
|
||||
session_id: getSessionId(),
|
||||
additional_metadata: Buffer.from(
|
||||
jsonStringify({
|
||||
transform_error: 'core_metadata attribute is missing',
|
||||
}),
|
||||
).toString('base64'),
|
||||
}),
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
// Transform to 1P format
|
||||
const formatted = to1PEventFormat(
|
||||
coreMetadata,
|
||||
userMetadata,
|
||||
eventMetadata,
|
||||
)
|
||||
|
||||
// _PROTO_* keys are PII-tagged values meant only for privileged BQ
|
||||
// columns. Hoist known keys to proto fields, then defensively strip any
|
||||
// remaining _PROTO_* so an unrecognized future key can't silently land
|
||||
// in the general-access additional_metadata blob. sink.ts applies the
|
||||
// same strip before Datadog; this closes the 1P side.
|
||||
const {
|
||||
_PROTO_skill_name,
|
||||
_PROTO_plugin_name,
|
||||
_PROTO_marketplace_name,
|
||||
...rest
|
||||
} = formatted.additional
|
||||
const additionalMetadata = stripProtoFields(rest)
|
||||
|
||||
events.push({
|
||||
event_type: 'ClaudeCodeInternalEvent',
|
||||
event_data: ClaudeCodeInternalEvent.toJSON({
|
||||
event_id: attributes.event_id as string | undefined,
|
||||
event_name: eventName,
|
||||
client_timestamp: this.hrTimeToDate(log.hrTime),
|
||||
device_id: attributes.user_id as string | undefined,
|
||||
email: userMetadata?.email,
|
||||
auth: formatted.auth,
|
||||
...formatted.core,
|
||||
env: formatted.env,
|
||||
process: formatted.process,
|
||||
skill_name:
|
||||
typeof _PROTO_skill_name === 'string'
|
||||
? _PROTO_skill_name
|
||||
: undefined,
|
||||
plugin_name:
|
||||
typeof _PROTO_plugin_name === 'string'
|
||||
? _PROTO_plugin_name
|
||||
: undefined,
|
||||
marketplace_name:
|
||||
typeof _PROTO_marketplace_name === 'string'
|
||||
? _PROTO_marketplace_name
|
||||
: undefined,
|
||||
additional_metadata:
|
||||
Object.keys(additionalMetadata).length > 0
|
||||
? Buffer.from(jsonStringify(additionalMetadata)).toString(
|
||||
'base64',
|
||||
)
|
||||
: undefined,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
return { events }
|
||||
}
|
||||
|
||||
async shutdown(): Promise<void> {
|
||||
this.isShutdown = true
|
||||
this.resetBackoff()
|
||||
await this.forceFlush()
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging('1P event logging exporter shutdown complete')
|
||||
}
|
||||
}
|
||||
|
||||
async forceFlush(): Promise<void> {
|
||||
await Promise.all(this.pendingExports)
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
logForDebugging('1P event logging exporter flush complete')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function getAxiosErrorContext(error: unknown): string {
|
||||
if (!axios.isAxiosError(error)) {
|
||||
return errorMessage(error)
|
||||
}
|
||||
|
||||
const parts: string[] = []
|
||||
|
||||
const requestId = error.response?.headers?.['request-id']
|
||||
if (requestId) {
|
||||
parts.push(`request-id=${requestId}`)
|
||||
}
|
||||
|
||||
if (error.response?.status) {
|
||||
parts.push(`status=${error.response.status}`)
|
||||
}
|
||||
|
||||
if (error.code) {
|
||||
parts.push(`code=${error.code}`)
|
||||
}
|
||||
|
||||
if (error.message) {
|
||||
parts.push(error.message)
|
||||
}
|
||||
|
||||
return parts.join(', ')
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,74 +1,21 @@
|
||||
/**
|
||||
* Analytics service - public API for event logging
|
||||
* Analytics service - stub implementation
|
||||
*
|
||||
* This module serves as the main entry point for analytics events in Claude CLI.
|
||||
*
|
||||
* DESIGN: This module has NO dependencies to avoid import cycles.
|
||||
* Events are queued until attachAnalyticsSink() is called during app initialization.
|
||||
* The sink handles routing to Datadog and 1P event logging.
|
||||
* This module has been modified to disable all telemetry and monitoring as per user request.
|
||||
* It maintains the original interface to avoid breaking the codebase, but all logging is a no-op.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Marker type for verifying analytics metadata doesn't contain sensitive data
|
||||
*
|
||||
* This type forces explicit verification that string values being logged
|
||||
* don't contain code snippets, file paths, or other sensitive information.
|
||||
*
|
||||
* Usage: `myString as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS`
|
||||
*/
|
||||
export type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS = never
|
||||
|
||||
/**
|
||||
* Marker type for values routed to PII-tagged proto columns via `_PROTO_*`
|
||||
* payload keys. The destination BQ column has privileged access controls,
|
||||
* so unredacted values are acceptable — unlike general-access backends.
|
||||
*
|
||||
* sink.ts strips `_PROTO_*` keys before Datadog fanout; only the 1P
|
||||
* exporter (firstPartyEventLoggingExporter) sees them and hoists them to the
|
||||
* top-level proto field. A single stripProtoFields call guards all non-1P
|
||||
* sinks — no per-sink filtering to forget.
|
||||
*
|
||||
* Usage: `rawName as AnalyticsMetadata_I_VERIFIED_THIS_IS_PII_TAGGED`
|
||||
*/
|
||||
export type AnalyticsMetadata_I_VERIFIED_THIS_IS_PII_TAGGED = never
|
||||
|
||||
/**
|
||||
* Strip `_PROTO_*` keys from a payload destined for general-access storage.
|
||||
* Used by:
|
||||
* - sink.ts: before Datadog fanout (never sees PII-tagged values)
|
||||
* - firstPartyEventLoggingExporter: defensive strip of additional_metadata
|
||||
* after hoisting known _PROTO_* keys to proto fields — prevents a future
|
||||
* unrecognized _PROTO_foo from silently landing in the BQ JSON blob.
|
||||
*
|
||||
* Returns the input unchanged (same reference) when no _PROTO_ keys present.
|
||||
*/
|
||||
export function stripProtoFields<V>(
|
||||
metadata: Record<string, V>,
|
||||
): Record<string, V> {
|
||||
let result: Record<string, V> | undefined
|
||||
for (const key in metadata) {
|
||||
if (key.startsWith('_PROTO_')) {
|
||||
if (result === undefined) {
|
||||
result = { ...metadata }
|
||||
}
|
||||
delete result[key]
|
||||
}
|
||||
}
|
||||
return result ?? metadata
|
||||
return metadata
|
||||
}
|
||||
|
||||
// Internal type for logEvent metadata - different from the enriched EventMetadata in metadata.ts
|
||||
type LogEventMetadata = { [key: string]: boolean | number | undefined }
|
||||
|
||||
type QueuedEvent = {
|
||||
eventName: string
|
||||
metadata: LogEventMetadata
|
||||
async: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Sink interface for the analytics backend
|
||||
*/
|
||||
export type AnalyticsSink = {
|
||||
logEvent: (eventName: string, metadata: LogEventMetadata) => void
|
||||
logEventAsync: (
|
||||
@@ -77,97 +24,24 @@ export type AnalyticsSink = {
|
||||
) => Promise<void>
|
||||
}
|
||||
|
||||
// Event queue for events logged before sink is attached
|
||||
const eventQueue: QueuedEvent[] = []
|
||||
|
||||
// Sink - initialized during app startup
|
||||
let sink: AnalyticsSink | null = null
|
||||
|
||||
/**
|
||||
* Attach the analytics sink that will receive all events.
|
||||
* Queued events are drained asynchronously via queueMicrotask to avoid
|
||||
* adding latency to the startup path.
|
||||
*
|
||||
* Idempotent: if a sink is already attached, this is a no-op. This allows
|
||||
* calling from both the preAction hook (for subcommands) and setup() (for
|
||||
* the default command) without coordination.
|
||||
*/
|
||||
export function attachAnalyticsSink(newSink: AnalyticsSink): void {
|
||||
if (sink !== null) {
|
||||
return
|
||||
}
|
||||
sink = newSink
|
||||
|
||||
// Drain the queue asynchronously to avoid blocking startup
|
||||
if (eventQueue.length > 0) {
|
||||
const queuedEvents = [...eventQueue]
|
||||
eventQueue.length = 0
|
||||
|
||||
// Log queue size for ants to help debug analytics initialization timing
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
sink.logEvent('analytics_sink_attached', {
|
||||
queued_event_count: queuedEvents.length,
|
||||
})
|
||||
}
|
||||
|
||||
queueMicrotask(() => {
|
||||
for (const event of queuedEvents) {
|
||||
if (event.async) {
|
||||
void sink!.logEventAsync(event.eventName, event.metadata)
|
||||
} else {
|
||||
sink!.logEvent(event.eventName, event.metadata)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
export function attachAnalyticsSink(_newSink: AnalyticsSink): void {
|
||||
// No-op: Analytics is disabled.
|
||||
}
|
||||
|
||||
/**
|
||||
* Log an event to analytics backends (synchronous)
|
||||
*
|
||||
* Events may be sampled based on the 'tengu_event_sampling_config' dynamic config.
|
||||
* When sampled, the sample_rate is added to the event metadata.
|
||||
*
|
||||
* If no sink is attached, events are queued and drained when the sink attaches.
|
||||
*/
|
||||
export function logEvent(
|
||||
eventName: string,
|
||||
// intentionally no strings unless AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
// to avoid accidentally logging code/filepaths
|
||||
metadata: LogEventMetadata,
|
||||
_eventName: string,
|
||||
_metadata: LogEventMetadata,
|
||||
): void {
|
||||
if (sink === null) {
|
||||
eventQueue.push({ eventName, metadata, async: false })
|
||||
return
|
||||
}
|
||||
sink.logEvent(eventName, metadata)
|
||||
// No-op: Analytics is disabled.
|
||||
}
|
||||
|
||||
/**
|
||||
* Log an event to analytics backends (asynchronous)
|
||||
*
|
||||
* Events may be sampled based on the 'tengu_event_sampling_config' dynamic config.
|
||||
* When sampled, the sample_rate is added to the event metadata.
|
||||
*
|
||||
* If no sink is attached, events are queued and drained when the sink attaches.
|
||||
*/
|
||||
export async function logEventAsync(
|
||||
eventName: string,
|
||||
// intentionally no strings, to avoid accidentally logging code/filepaths
|
||||
metadata: LogEventMetadata,
|
||||
_eventName: string,
|
||||
_metadata: LogEventMetadata,
|
||||
): Promise<void> {
|
||||
if (sink === null) {
|
||||
eventQueue.push({ eventName, metadata, async: true })
|
||||
return
|
||||
}
|
||||
await sink.logEventAsync(eventName, metadata)
|
||||
// No-op: Analytics is disabled.
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset analytics state for testing purposes only.
|
||||
* @internal
|
||||
*/
|
||||
export function _resetForTesting(): void {
|
||||
sink = null
|
||||
eventQueue.length = 0
|
||||
// No-op.
|
||||
}
|
||||
|
||||
@@ -564,10 +564,12 @@ function getAgentIdentification(): {
|
||||
* Extract base version from full version string. "2.0.36-dev.20251107.t174150.sha2709699" → "2.0.36-dev"
|
||||
*/
|
||||
const getVersionBase = memoize((): string | undefined => {
|
||||
const match = MACRO.VERSION.match(/^\d+\.\d+\.\d+(?:-[a-z]+)?/)
|
||||
const match = VERSION.match(/^\d+\.\d+\.\d+(?:-[a-z]+)?/)
|
||||
return match ? match[0] : undefined
|
||||
})
|
||||
|
||||
import { VERSION, BUILD_TIME } from '../../constants/product.js'
|
||||
|
||||
/**
|
||||
* Builds the environment context object
|
||||
*/
|
||||
@@ -617,9 +619,9 @@ const buildEnvContext = memoize(async (): Promise<EnvContext> => {
|
||||
isGithubAction: isEnvTruthy(process.env.GITHUB_ACTIONS),
|
||||
isClaudeCodeAction: isEnvTruthy(process.env.CLAUDE_CODE_ACTION),
|
||||
isClaudeAiAuth: isClaudeAISubscriber(),
|
||||
version: MACRO.VERSION,
|
||||
version: VERSION,
|
||||
versionBase: getVersionBase(),
|
||||
buildTime: MACRO.BUILD_TIME,
|
||||
buildTime: BUILD_TIME,
|
||||
deploymentEnvironment: env.detectDeploymentEnvironment(),
|
||||
...(isEnvTruthy(process.env.GITHUB_ACTIONS) && {
|
||||
githubEventName: process.env.GITHUB_EVENT_NAME,
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
/**
|
||||
* Analytics sink implementation
|
||||
*
|
||||
* This module contains the actual analytics routing logic and should be
|
||||
* initialized during app startup. It routes events to Datadog and 1P event
|
||||
* logging.
|
||||
*
|
||||
* Usage: Call initializeAnalyticsSink() during app startup to attach the sink.
|
||||
*/
|
||||
|
||||
import { trackDatadogEvent } from './datadog.js'
|
||||
import { logEventTo1P, shouldSampleEvent } from './firstPartyEventLogger.js'
|
||||
import { checkStatsigFeatureGate_CACHED_MAY_BE_STALE } from './growthbook.js'
|
||||
import { attachAnalyticsSink, stripProtoFields } from './index.js'
|
||||
import { isSinkKilled } from './sinkKillswitch.js'
|
||||
|
||||
// Local type matching the logEvent metadata signature
|
||||
type LogEventMetadata = { [key: string]: boolean | number | undefined }
|
||||
|
||||
const DATADOG_GATE_NAME = 'tengu_log_datadog_events'
|
||||
|
||||
// Module-level gate state - starts undefined, initialized during startup
|
||||
let isDatadogGateEnabled: boolean | undefined = undefined
|
||||
|
||||
/**
|
||||
* Check if Datadog tracking is enabled.
|
||||
* Falls back to cached value from previous session if not yet initialized.
|
||||
*/
|
||||
function shouldTrackDatadog(): boolean {
|
||||
if (isSinkKilled('datadog')) {
|
||||
return false
|
||||
}
|
||||
if (isDatadogGateEnabled !== undefined) {
|
||||
return isDatadogGateEnabled
|
||||
}
|
||||
|
||||
// Fallback to cached value from previous session
|
||||
try {
|
||||
return checkStatsigFeatureGate_CACHED_MAY_BE_STALE(DATADOG_GATE_NAME)
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Log an event (synchronous implementation)
|
||||
*/
|
||||
function logEventImpl(eventName: string, metadata: LogEventMetadata): void {
|
||||
// Check if this event should be sampled
|
||||
const sampleResult = shouldSampleEvent(eventName)
|
||||
|
||||
// If sample result is 0, the event was not selected for logging
|
||||
if (sampleResult === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
// If sample result is a positive number, add it to metadata
|
||||
const metadataWithSampleRate =
|
||||
sampleResult !== null
|
||||
? { ...metadata, sample_rate: sampleResult }
|
||||
: metadata
|
||||
|
||||
if (shouldTrackDatadog()) {
|
||||
// Datadog is a general-access backend — strip _PROTO_* keys
|
||||
// (unredacted PII-tagged values meant only for the 1P privileged column).
|
||||
void trackDatadogEvent(eventName, stripProtoFields(metadataWithSampleRate))
|
||||
}
|
||||
|
||||
// 1P receives the full payload including _PROTO_* — the exporter
|
||||
// destructures and routes those keys to proto fields itself.
|
||||
logEventTo1P(eventName, metadataWithSampleRate)
|
||||
}
|
||||
|
||||
/**
|
||||
* Log an event (asynchronous implementation)
|
||||
*
|
||||
* With Segment removed the two remaining sinks are fire-and-forget, so this
|
||||
* just wraps the sync impl — kept to preserve the sink interface contract.
|
||||
*/
|
||||
function logEventAsyncImpl(
|
||||
eventName: string,
|
||||
metadata: LogEventMetadata,
|
||||
): Promise<void> {
|
||||
logEventImpl(eventName, metadata)
|
||||
return Promise.resolve()
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize analytics gates during startup.
|
||||
*
|
||||
* Updates gate values from server. Early events use cached values from previous
|
||||
* session to avoid data loss during initialization.
|
||||
*
|
||||
* Called from main.tsx during setupBackend().
|
||||
*/
|
||||
export function initializeAnalyticsGates(): void {
|
||||
isDatadogGateEnabled =
|
||||
checkStatsigFeatureGate_CACHED_MAY_BE_STALE(DATADOG_GATE_NAME)
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the analytics sink.
|
||||
*
|
||||
* Call this during app startup to attach the analytics backend.
|
||||
* Any events logged before this is called will be queued and drained.
|
||||
*
|
||||
* Idempotent: safe to call multiple times (subsequent calls are no-ops).
|
||||
*/
|
||||
export function initializeAnalyticsSink(): void {
|
||||
attachAnalyticsSink({
|
||||
logEvent: logEventImpl,
|
||||
logEventAsync: logEventAsyncImpl,
|
||||
})
|
||||
}
|
||||
@@ -1,7 +1,4 @@
|
||||
import { getDynamicConfig_CACHED_MAY_BE_STALE } from './growthbook.js'
|
||||
|
||||
// Mangled name: per-sink analytics killswitch
|
||||
const SINK_KILLSWITCH_CONFIG_NAME = 'tengu_frond_boric'
|
||||
|
||||
export type SinkName = 'datadog' | 'firstParty'
|
||||
|
||||
@@ -15,11 +12,7 @@ export type SinkName = 'datadog' | 'firstParty'
|
||||
* growthbook.ts:isGrowthBookEnabled() calls that, so a lookup here would recurse.
|
||||
* Call at per-event dispatch sites instead.
|
||||
*/
|
||||
export function isSinkKilled(sink: SinkName): boolean {
|
||||
const config = getDynamicConfig_CACHED_MAY_BE_STALE<
|
||||
Partial<Record<SinkName, boolean>>
|
||||
>(SINK_KILLSWITCH_CONFIG_NAME, {})
|
||||
// getFeatureValue_CACHED_MAY_BE_STALE guards on `!== undefined`, so a
|
||||
// cached JSON null leaks through instead of falling back to {}.
|
||||
return config?.[sink] === true
|
||||
export function isSinkKilled(_sink: SinkName): boolean {
|
||||
// Permanently disabled as per telemetry purge requirement.
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import axios from 'axios'
|
||||
import { getOauthConfig } from '../../constants/oauth.js'
|
||||
import { getOAuthHeaders, prepareApiRequest } from '../../utils/teleport/api.js'
|
||||
import { nativeRequest } from '../../utils/http.js'
|
||||
|
||||
export type AdminRequestType = 'limit_increase' | 'seat_upgrade'
|
||||
|
||||
@@ -58,7 +58,11 @@ export async function createAdminRequest(
|
||||
|
||||
const url = `${getOauthConfig().BASE_API_URL}/api/oauth/organizations/${orgUUID}/admin_requests`
|
||||
|
||||
const response = await axios.post<AdminRequest>(url, params, { headers })
|
||||
const response = await nativeRequest<AdminRequest>(url, {
|
||||
method: 'POST',
|
||||
body: params,
|
||||
headers,
|
||||
})
|
||||
|
||||
return response.data
|
||||
}
|
||||
@@ -84,7 +88,8 @@ export async function getMyAdminRequests(
|
||||
url += `&statuses=${status}`
|
||||
}
|
||||
|
||||
const response = await axios.get<AdminRequest[] | null>(url, {
|
||||
const response = await nativeRequest<AdminRequest[] | null>(url, {
|
||||
method: 'GET',
|
||||
headers,
|
||||
})
|
||||
|
||||
@@ -111,7 +116,8 @@ export async function checkAdminRequestEligibility(
|
||||
|
||||
const url = `${getOauthConfig().BASE_API_URL}/api/oauth/organizations/${orgUUID}/admin_requests/eligibility?request_type=${requestType}`
|
||||
|
||||
const response = await axios.get<AdminRequestEligibilityResponse>(url, {
|
||||
const response = await nativeRequest<AdminRequestEligibilityResponse>(url, {
|
||||
method: 'GET',
|
||||
headers,
|
||||
})
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import axios from 'axios'
|
||||
import isEqual from 'lodash-es/isEqual.js'
|
||||
import {
|
||||
getAnthropicApiKey,
|
||||
@@ -9,7 +8,7 @@ import { z } from 'zod'
|
||||
import { getOauthConfig, OAUTH_BETA_HEADER } from '../../constants/oauth.js'
|
||||
import { getGlobalConfig, saveGlobalConfig } from '../../utils/config.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { withOAuth401Retry } from '../../utils/http.js'
|
||||
import { isHttpError, nativeRequest, withOAuth401Retry } from '../../utils/http.js'
|
||||
import { lazySchema } from '../../utils/lazySchema.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
import { getAPIProvider } from '../../utils/model/providers.js'
|
||||
@@ -82,7 +81,8 @@ async function fetchBootstrapAPI(): Promise<BootstrapResponse | null> {
|
||||
}
|
||||
|
||||
logForDebugging('[Bootstrap] Fetching')
|
||||
const response = await axios.get<unknown>(endpoint, {
|
||||
const response = await nativeRequest<unknown>(endpoint, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
@@ -102,7 +102,7 @@ async function fetchBootstrapAPI(): Promise<BootstrapResponse | null> {
|
||||
})
|
||||
} catch (error) {
|
||||
logForDebugging(
|
||||
`[Bootstrap] Fetch failed: ${axios.isAxiosError(error) ? (error.response?.status ?? error.code) : 'unknown'}`,
|
||||
`[Bootstrap] Fetch failed: ${isHttpError(error) ? (error.status ?? error.code) : 'unknown'}`,
|
||||
)
|
||||
throw error
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ import {
|
||||
API_PDF_MAX_PAGES,
|
||||
PDF_TARGET_RAW_SIZE,
|
||||
} from '../../constants/apiLimits.js'
|
||||
import { FEEDBACK_CHANNEL } from '../../constants/product.js'
|
||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import { formatFileSize } from '../../utils/format.js'
|
||||
import { ImageResizeError } from '../../utils/imageResizer.js'
|
||||
@@ -685,7 +686,7 @@ export function getAssistantMessageFromError(
|
||||
}
|
||||
|
||||
if (process.env.USER_TYPE === 'ant') {
|
||||
const baseMessage = `API Error: 400 ${error.message}\n\nRun /share and post the JSON file to ${MACRO.FEEDBACK_CHANNEL}.`
|
||||
const baseMessage = `API Error: 400 ${error.message}\n\nRun /share and post the JSON file to ${FEEDBACK_CHANNEL}.`
|
||||
const rewindInstruction = getIsNonInteractiveSession()
|
||||
? ''
|
||||
: ' Then, use /rewind to recover the conversation.'
|
||||
@@ -760,8 +761,8 @@ export function getAssistantMessageFromError(
|
||||
const orgId = getOauthAccountInfo()?.organizationUuid
|
||||
const baseMsg = `[ANT-ONLY] Your org isn't gated into the \`${model}\` model. Either run \`claude\` with \`ANTHROPIC_MODEL=${getDefaultMainLoopModelSetting()}\``
|
||||
const msg = orgId
|
||||
? `${baseMsg} or share your orgId (${orgId}) in ${MACRO.FEEDBACK_CHANNEL} for help getting access.`
|
||||
: `${baseMsg} or reach out in ${MACRO.FEEDBACK_CHANNEL} for help getting access.`
|
||||
? `${baseMsg} or share your orgId (${orgId}) in ${FEEDBACK_CHANNEL} for help getting access.`
|
||||
: `${baseMsg} or reach out in ${FEEDBACK_CHANNEL} for help getting access.`
|
||||
|
||||
return createAssistantAPIErrorMessage({
|
||||
content: msg,
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
* API Reference: https://docs.anthropic.com/en/api/files-content
|
||||
*/
|
||||
|
||||
import axios from 'axios'
|
||||
import { randomUUID } from 'crypto'
|
||||
import * as fs from 'fs/promises'
|
||||
import * as path from 'path'
|
||||
@@ -15,6 +14,7 @@ import { count } from '../../utils/array.js'
|
||||
import { getCwd } from '../../utils/cwd.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { errorMessage } from '../../utils/errors.js'
|
||||
import { isHttpError, nativeRequest } from '../../utils/http.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
import { sleep } from '../../utils/sleep.js'
|
||||
import {
|
||||
@@ -146,16 +146,17 @@ export async function downloadFile(
|
||||
|
||||
return retryWithBackoff(`Download file ${fileId}`, async () => {
|
||||
try {
|
||||
const response = await axios.get(url, {
|
||||
const response = await nativeRequest<ArrayBuffer>(url, {
|
||||
method: 'GET',
|
||||
headers,
|
||||
responseType: 'arraybuffer',
|
||||
timeout: 60000, // 60 second timeout for large files
|
||||
validateStatus: status => status < 500,
|
||||
})
|
||||
|
||||
if (response.status === 200) {
|
||||
logDebug(`Downloaded file ${fileId} (${response.data.length} bytes)`)
|
||||
return { done: true, value: Buffer.from(response.data) }
|
||||
const buffer = Buffer.from(response.data)
|
||||
logDebug(`Downloaded file ${fileId} (${buffer.length} bytes)`)
|
||||
return { done: true, value: buffer }
|
||||
}
|
||||
|
||||
// Non-retriable errors - throw immediately
|
||||
@@ -171,10 +172,10 @@ export async function downloadFile(
|
||||
|
||||
return { done: false, error: `status ${response.status}` }
|
||||
} catch (error) {
|
||||
if (!axios.isAxiosError(error)) {
|
||||
throw error
|
||||
if (isHttpError(error)) {
|
||||
return { done: false, error: `${error.status} ${error.message}` }
|
||||
}
|
||||
return { done: false, error: error.message }
|
||||
return { done: false, error: errorMessage(error) }
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -457,7 +458,9 @@ export async function uploadFile(
|
||||
try {
|
||||
return await retryWithBackoff(`Upload file ${relativePath}`, async () => {
|
||||
try {
|
||||
const response = await axios.post(url, body, {
|
||||
const response = await nativeRequest<any>(url, {
|
||||
method: 'POST',
|
||||
body,
|
||||
headers: {
|
||||
...headers,
|
||||
'Content-Type': `multipart/form-data; boundary=${boundary}`,
|
||||
@@ -465,7 +468,6 @@ export async function uploadFile(
|
||||
},
|
||||
timeout: 120000, // 2 minute timeout for uploads
|
||||
signal: opts?.signal,
|
||||
validateStatus: status => status < 500,
|
||||
})
|
||||
|
||||
if (response.status === 200 || response.status === 201) {
|
||||
@@ -521,11 +523,11 @@ export async function uploadFile(
|
||||
if (error instanceof UploadNonRetriableError) {
|
||||
throw error
|
||||
}
|
||||
if (axios.isCancel(error)) {
|
||||
throw new UploadNonRetriableError('Upload canceled')
|
||||
}
|
||||
// Network errors are retriable
|
||||
if (axios.isAxiosError(error)) {
|
||||
if (isHttpError(error)) {
|
||||
if (error.code === 'ECONNABORTED' || error.status === 408) {
|
||||
return { done: false, error: 'Upload timeout' }
|
||||
}
|
||||
return { done: false, error: error.message }
|
||||
}
|
||||
throw error
|
||||
@@ -643,11 +645,12 @@ export async function listFilesCreatedAfter(
|
||||
`List files after ${afterCreatedAt}`,
|
||||
async () => {
|
||||
try {
|
||||
const response = await axios.get(`${baseUrl}/v1/files`, {
|
||||
const queryParams = new URLSearchParams(params).toString()
|
||||
const fullUrl = `${baseUrl}/v1/files${queryParams ? `?${queryParams}` : ''}`
|
||||
const response = await nativeRequest<any>(fullUrl, {
|
||||
method: 'GET',
|
||||
headers,
|
||||
params,
|
||||
timeout: 60000,
|
||||
validateStatus: status => status < 500,
|
||||
})
|
||||
|
||||
if (response.status === 200) {
|
||||
@@ -671,14 +674,14 @@ export async function listFilesCreatedAfter(
|
||||
|
||||
return { done: false, error: `status ${response.status}` }
|
||||
} catch (error) {
|
||||
if (!axios.isAxiosError(error)) {
|
||||
throw error
|
||||
if (isHttpError(error)) {
|
||||
logEvent('tengu_file_list_failed', {
|
||||
error_type:
|
||||
'network' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
})
|
||||
return { done: false, error: error.message }
|
||||
}
|
||||
logEvent('tengu_file_list_failed', {
|
||||
error_type:
|
||||
'network' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
})
|
||||
return { done: false, error: error.message }
|
||||
throw error
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import axios from 'axios'
|
||||
import { getOauthConfig } from '../../constants/oauth.js'
|
||||
import { getGlobalConfig, saveGlobalConfig } from '../../utils/config.js'
|
||||
import { getAuthHeaders } from '../../utils/http.js'
|
||||
import { getAuthHeaders, nativeRequest } from '../../utils/http.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
import { getClaudeCodeUserAgent } from '../../utils/userAgent.js'
|
||||
|
||||
@@ -26,7 +25,8 @@ export async function fetchAndStoreClaudeCodeFirstTokenDate(): Promise<void> {
|
||||
const oauthConfig = getOauthConfig()
|
||||
const url = `${oauthConfig.BASE_API_URL}/api/organization/claude_code_first_token_date`
|
||||
|
||||
const response = await axios.get(url, {
|
||||
const response = await nativeRequest<any>(url, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
...authHeaders.headers,
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import axios from 'axios'
|
||||
import memoize from 'lodash-es/memoize.js'
|
||||
import {
|
||||
type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
@@ -14,6 +13,7 @@ import { getGlobalConfig, saveGlobalConfig } from '../../utils/config.js'
|
||||
import {
|
||||
getAuthHeaders,
|
||||
getUserAgent,
|
||||
nativeRequest,
|
||||
withOAuth401Retry,
|
||||
} from '../../utils/http.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
@@ -61,9 +61,10 @@ export const getGroveSettings = memoize(
|
||||
if (authHeaders.error) {
|
||||
throw new Error(`Failed to get auth headers: ${authHeaders.error}`)
|
||||
}
|
||||
return axios.get<AccountSettings>(
|
||||
return nativeRequest<AccountSettings>(
|
||||
`${getOauthConfig().BASE_API_URL}/api/oauth/account/settings`,
|
||||
{
|
||||
method: 'GET',
|
||||
headers: {
|
||||
...authHeaders.headers,
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
@@ -94,10 +95,11 @@ export async function markGroveNoticeViewed(): Promise<void> {
|
||||
if (authHeaders.error) {
|
||||
throw new Error(`Failed to get auth headers: ${authHeaders.error}`)
|
||||
}
|
||||
return axios.post(
|
||||
return nativeRequest(
|
||||
`${getOauthConfig().BASE_API_URL}/api/oauth/account/grove_notice_viewed`,
|
||||
{},
|
||||
{
|
||||
method: 'POST',
|
||||
body: {},
|
||||
headers: {
|
||||
...authHeaders.headers,
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
@@ -126,12 +128,13 @@ export async function updateGroveSettings(
|
||||
if (authHeaders.error) {
|
||||
throw new Error(`Failed to get auth headers: ${authHeaders.error}`)
|
||||
}
|
||||
return axios.patch(
|
||||
return nativeRequest(
|
||||
`${getOauthConfig().BASE_API_URL}/api/oauth/account/settings`,
|
||||
{
|
||||
grove_enabled: groveEnabled,
|
||||
},
|
||||
{
|
||||
method: 'PATCH',
|
||||
body: {
|
||||
grove_enabled: groveEnabled,
|
||||
},
|
||||
headers: {
|
||||
...authHeaders.headers,
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
@@ -241,9 +244,10 @@ export const getGroveNoticeConfig = memoize(
|
||||
if (authHeaders.error) {
|
||||
throw new Error(`Failed to get auth headers: ${authHeaders.error}`)
|
||||
}
|
||||
return axios.get<GroveConfig>(
|
||||
return nativeRequest<GroveConfig>(
|
||||
`${getOauthConfig().BASE_API_URL}/api/claude_code_grove`,
|
||||
{
|
||||
method: 'GET',
|
||||
headers: {
|
||||
...authHeaders.headers,
|
||||
'User-Agent': getUserAgent(),
|
||||
|
||||
@@ -34,6 +34,7 @@ import {
|
||||
type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
logEvent,
|
||||
} from '../analytics/index.js'
|
||||
import { BUILD_TIME } from '../../constants/product.js'
|
||||
import { sanitizeToolNameForAnalytics } from '../analytics/metadata.js'
|
||||
import { EMPTY_USAGE } from './emptyUsage.js'
|
||||
import { classifyAPIError } from './errors.js'
|
||||
@@ -162,8 +163,8 @@ function getAnthropicEnvMetadata() {
|
||||
}
|
||||
|
||||
function getBuildAgeMinutes(): number | undefined {
|
||||
if (!MACRO.BUILD_TIME) return undefined
|
||||
const buildTime = new Date(MACRO.BUILD_TIME).getTime()
|
||||
if (!BUILD_TIME) return undefined
|
||||
const buildTime = new Date(BUILD_TIME).getTime()
|
||||
if (isNaN(buildTime)) return undefined
|
||||
return Math.floor((Date.now() - buildTime) / 60000)
|
||||
}
|
||||
|
||||
@@ -1,159 +1,24 @@
|
||||
import axios from 'axios'
|
||||
import { hasProfileScope, isClaudeAISubscriber } from '../../utils/auth.js'
|
||||
import { getGlobalConfig, saveGlobalConfig } from '../../utils/config.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { errorMessage } from '../../utils/errors.js'
|
||||
import { getAuthHeaders, withOAuth401Retry } from '../../utils/http.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
import { memoizeWithTTLAsync } from '../../utils/memoize.js'
|
||||
import { isEssentialTrafficOnly } from '../../utils/privacyLevel.js'
|
||||
import { getClaudeCodeUserAgent } from '../../utils/userAgent.js'
|
||||
/**
|
||||
* Metrics Opt-Out Service (Stubbed)
|
||||
*
|
||||
* This service is stubbed to always report that metrics are disabled,
|
||||
* ensuring no telemetry or logging data is sent to external services.
|
||||
*/
|
||||
|
||||
type MetricsEnabledResponse = {
|
||||
metrics_logging_enabled: boolean
|
||||
}
|
||||
|
||||
type MetricsStatus = {
|
||||
export type MetricsStatus = {
|
||||
enabled: boolean
|
||||
hasError: boolean
|
||||
}
|
||||
|
||||
// In-memory TTL — dedupes calls within a single process
|
||||
const CACHE_TTL_MS = 60 * 60 * 1000
|
||||
|
||||
// Disk TTL — org settings rarely change. When disk cache is fresher than this,
|
||||
// we skip the network entirely (no background refresh). This is what collapses
|
||||
// N `claude -p` invocations into ~1 API call/day.
|
||||
const DISK_CACHE_TTL_MS = 24 * 60 * 60 * 1000
|
||||
|
||||
/**
|
||||
* Internal function to call the API and check if metrics are enabled
|
||||
* This is wrapped by memoizeWithTTLAsync to add caching behavior
|
||||
*/
|
||||
async function _fetchMetricsEnabled(): Promise<MetricsEnabledResponse> {
|
||||
const authResult = getAuthHeaders()
|
||||
if (authResult.error) {
|
||||
throw new Error(`Auth error: ${authResult.error}`)
|
||||
}
|
||||
|
||||
const headers = {
|
||||
'Content-Type': 'application/json',
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
...authResult.headers,
|
||||
}
|
||||
|
||||
const endpoint = `https://api.anthropic.com/api/claude_code/organizations/metrics_enabled`
|
||||
const response = await axios.get<MetricsEnabledResponse>(endpoint, {
|
||||
headers,
|
||||
timeout: 5000,
|
||||
})
|
||||
return response.data
|
||||
}
|
||||
|
||||
async function _checkMetricsEnabledAPI(): Promise<MetricsStatus> {
|
||||
// Incident kill switch: skip the network call when nonessential traffic is disabled.
|
||||
// Returning enabled:false sheds load at the consumer (bigqueryExporter skips
|
||||
// export). Matches the non-subscriber early-return shape below.
|
||||
if (isEssentialTrafficOnly()) {
|
||||
return { enabled: false, hasError: false }
|
||||
}
|
||||
|
||||
try {
|
||||
const data = await withOAuth401Retry(_fetchMetricsEnabled, {
|
||||
also403Revoked: true,
|
||||
})
|
||||
|
||||
logForDebugging(
|
||||
`Metrics opt-out API response: enabled=${data.metrics_logging_enabled}`,
|
||||
)
|
||||
|
||||
return {
|
||||
enabled: data.metrics_logging_enabled,
|
||||
hasError: false,
|
||||
}
|
||||
} catch (error) {
|
||||
logForDebugging(
|
||||
`Failed to check metrics opt-out status: ${errorMessage(error)}`,
|
||||
)
|
||||
logError(error)
|
||||
return { enabled: false, hasError: true }
|
||||
}
|
||||
}
|
||||
|
||||
// Create memoized version with custom error handling
|
||||
const memoizedCheckMetrics = memoizeWithTTLAsync(
|
||||
_checkMetricsEnabledAPI,
|
||||
CACHE_TTL_MS,
|
||||
)
|
||||
|
||||
/**
|
||||
* Fetch (in-memory memoized) and persist to disk on change.
|
||||
* Errors are not persisted — a transient failure should not overwrite a
|
||||
* known-good disk value.
|
||||
*/
|
||||
async function refreshMetricsStatus(): Promise<MetricsStatus> {
|
||||
const result = await memoizedCheckMetrics()
|
||||
if (result.hasError) {
|
||||
return result
|
||||
}
|
||||
|
||||
const cached = getGlobalConfig().metricsStatusCache
|
||||
const unchanged = cached !== undefined && cached.enabled === result.enabled
|
||||
// Skip write when unchanged AND timestamp still fresh — avoids config churn
|
||||
// when concurrent callers race past a stale disk entry and all try to write.
|
||||
if (unchanged && Date.now() - cached.timestamp < DISK_CACHE_TTL_MS) {
|
||||
return result
|
||||
}
|
||||
|
||||
saveGlobalConfig(current => ({
|
||||
...current,
|
||||
metricsStatusCache: {
|
||||
enabled: result.enabled,
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
}))
|
||||
return result
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if metrics are enabled for the current organization.
|
||||
*
|
||||
* Two-tier cache:
|
||||
* - Disk (24h TTL): survives process restarts. Fresh disk cache → zero network.
|
||||
* - In-memory (1h TTL): dedupes the background refresh within a process.
|
||||
*
|
||||
* The caller (bigqueryExporter) tolerates stale reads — a missed export or
|
||||
* an extra one during the 24h window is acceptable.
|
||||
*/
|
||||
export async function checkMetricsEnabled(): Promise<MetricsStatus> {
|
||||
// Service key OAuth sessions lack user:profile scope → would 403.
|
||||
// API key users (non-subscribers) fall through and use x-api-key auth.
|
||||
// This check runs before the disk read so we never persist auth-state-derived
|
||||
// answers — only real API responses go to disk. Otherwise a service-key
|
||||
// session would poison the cache for a later full-OAuth session.
|
||||
if (isClaudeAISubscriber() && !hasProfileScope()) {
|
||||
return { enabled: false, hasError: false }
|
||||
}
|
||||
|
||||
const cached = getGlobalConfig().metricsStatusCache
|
||||
if (cached) {
|
||||
if (Date.now() - cached.timestamp > DISK_CACHE_TTL_MS) {
|
||||
// saveGlobalConfig's fallback path (config.ts:731) can throw if both
|
||||
// locked and fallback writes fail — catch here so fire-and-forget
|
||||
// doesn't become an unhandled rejection.
|
||||
void refreshMetricsStatus().catch(logError)
|
||||
}
|
||||
return {
|
||||
enabled: cached.enabled,
|
||||
hasError: false,
|
||||
}
|
||||
}
|
||||
|
||||
// First-ever run on this machine: block on the network to populate disk.
|
||||
return refreshMetricsStatus()
|
||||
// Always return disabled for a privacy-focused environment.
|
||||
return { enabled: false, hasError: false };
|
||||
}
|
||||
|
||||
export async function refreshMetricsStatus(): Promise<MetricsStatus> {
|
||||
return { enabled: false, hasError: false };
|
||||
}
|
||||
|
||||
// Export for testing purposes only
|
||||
export const _clearMetricsEnabledCacheForTesting = (): void => {
|
||||
memoizedCheckMetrics.cache.clear()
|
||||
}
|
||||
// No-op
|
||||
};
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import axios from 'axios'
|
||||
import { getOauthConfig } from '../../constants/oauth.js'
|
||||
import { getOauthAccountInfo } from '../../utils/auth.js'
|
||||
import { getGlobalConfig, saveGlobalConfig } from '../../utils/config.js'
|
||||
import { nativeRequest } from '../../utils/http.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
import { isEssentialTrafficOnly } from '../../utils/privacyLevel.js'
|
||||
import { getOAuthHeaders, prepareApiRequest } from '../../utils/teleport/api.js'
|
||||
@@ -30,7 +30,8 @@ async function fetchOverageCreditGrant(): Promise<OverageCreditGrantInfo | null>
|
||||
try {
|
||||
const { accessToken, orgUUID } = await prepareApiRequest()
|
||||
const url = `${getOauthConfig().BASE_API_URL}/api/oauth/organizations/${orgUUID}/overage_credit_grant`
|
||||
const response = await axios.get<OverageCreditGrantInfo>(url, {
|
||||
const response = await nativeRequest<OverageCreditGrantInfo>(url, {
|
||||
method: 'GET',
|
||||
headers: getOAuthHeaders(accessToken),
|
||||
})
|
||||
return response.data
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import axios from 'axios'
|
||||
import { getOauthConfig } from '../../constants/oauth.js'
|
||||
import {
|
||||
getOauthAccountInfo,
|
||||
@@ -7,6 +6,7 @@ import {
|
||||
} from '../../utils/auth.js'
|
||||
import { getGlobalConfig, saveGlobalConfig } from '../../utils/config.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { nativeRequest } from '../../utils/http.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
import { isEssentialTrafficOnly } from '../../utils/privacyLevel.js'
|
||||
import { getOAuthHeaders, prepareApiRequest } from '../../utils/teleport/api.js'
|
||||
@@ -35,9 +35,12 @@ export async function fetchReferralEligibility(
|
||||
|
||||
const url = `${getOauthConfig().BASE_API_URL}/api/oauth/organizations/${orgUUID}/referral/eligibility`
|
||||
|
||||
const response = await axios.get(url, {
|
||||
const queryParams = new URLSearchParams({ campaign }).toString()
|
||||
const fullUrl = `${url}${queryParams ? `?${queryParams}` : ''}`
|
||||
|
||||
const response = await nativeRequest<ReferralEligibilityResponse>(fullUrl, {
|
||||
method: 'GET',
|
||||
headers,
|
||||
params: { campaign },
|
||||
timeout: 5000, // 5 second timeout for background fetch
|
||||
})
|
||||
|
||||
@@ -56,9 +59,12 @@ export async function fetchReferralRedemptions(
|
||||
|
||||
const url = `${getOauthConfig().BASE_API_URL}/api/oauth/organizations/${orgUUID}/referral/redemptions`
|
||||
|
||||
const response = await axios.get<ReferralRedemptionsResponse>(url, {
|
||||
const queryParams = new URLSearchParams({ campaign }).toString()
|
||||
const fullUrl = `${url}${queryParams ? `?${queryParams}` : ''}`
|
||||
|
||||
const response = await nativeRequest<ReferralRedemptionsResponse>(fullUrl, {
|
||||
method: 'GET',
|
||||
headers,
|
||||
params: { campaign },
|
||||
timeout: 10000, // 10 second timeout
|
||||
})
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import axios, { type AxiosError } from 'axios'
|
||||
import type { UUID } from 'crypto'
|
||||
import { getOauthConfig } from '../../constants/oauth.js'
|
||||
import type { Entry, TranscriptMessage } from '../../types/logs.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { logForDiagnosticsNoPII } from '../../utils/diagLogs.js'
|
||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import { isHttpError, nativeRequest } from '../../utils/http.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
import { sequential } from '../../utils/sequential.js'
|
||||
import { getSessionIngressAuthToken } from '../../utils/sessionIngressAuth.js'
|
||||
@@ -74,9 +74,10 @@ async function appendSessionLogImpl(
|
||||
requestHeaders['Last-Uuid'] = lastUuid
|
||||
}
|
||||
|
||||
const response = await axios.put(url, entry, {
|
||||
const response = await nativeRequest(url, {
|
||||
method: 'PUT',
|
||||
body: entry,
|
||||
headers: requestHeaders,
|
||||
validateStatus: status => status < 500,
|
||||
})
|
||||
|
||||
if (response.status === 200 || response.status === 201) {
|
||||
@@ -118,11 +119,11 @@ async function appendSessionLogImpl(
|
||||
if (adoptedUuid) {
|
||||
lastUuidMap.set(sessionId, adoptedUuid)
|
||||
logForDebugging(
|
||||
`Session 409: re-fetched ${logs!.length} entries, adopting lastUuid=${adoptedUuid}, retrying entry ${entry.uuid}`,
|
||||
`Session 409: re-fetched ${(logs as any)!.length} entries, adopting lastUuid=${adoptedUuid}, retrying entry ${entry.uuid}`,
|
||||
)
|
||||
} else {
|
||||
// Can't determine server state — give up
|
||||
const errorData = response.data as SessionIngressError
|
||||
const errorData = response.data as any as SessionIngressError
|
||||
const errorMessage =
|
||||
errorData.error?.message || 'Concurrent modification detected'
|
||||
logError(
|
||||
@@ -148,21 +149,22 @@ async function appendSessionLogImpl(
|
||||
}
|
||||
|
||||
// Other 4xx (429, etc.) - retryable
|
||||
logForDebugging(
|
||||
`Failed to persist session log: ${response.status} ${response.statusText}`,
|
||||
)
|
||||
logForDebugging(`Failed to persist session log: ${response.status}`)
|
||||
logForDiagnosticsNoPII('error', 'session_persist_fail_status', {
|
||||
status: response.status,
|
||||
attempt,
|
||||
})
|
||||
} catch (error) {
|
||||
// Network errors, 5xx - retryable
|
||||
const axiosError = error as AxiosError<SessionIngressError>
|
||||
logError(new Error(`Error persisting session log: ${axiosError.message}`))
|
||||
logForDiagnosticsNoPII('error', 'session_persist_fail_status', {
|
||||
status: axiosError.status,
|
||||
attempt,
|
||||
})
|
||||
if (isHttpError(error)) {
|
||||
logError(new Error(`Error persisting session log: ${error.message}`))
|
||||
logForDiagnosticsNoPII('error', 'session_persist_fail_status', {
|
||||
status: error.status,
|
||||
attempt,
|
||||
})
|
||||
} else {
|
||||
logError(error)
|
||||
}
|
||||
}
|
||||
|
||||
if (attempt === MAX_RETRIES) {
|
||||
@@ -318,15 +320,19 @@ export async function getTeleportEvents(
|
||||
|
||||
let response
|
||||
try {
|
||||
response = await axios.get<TeleportEventsResponse>(baseUrl, {
|
||||
const queryParams = new URLSearchParams(params as any).toString()
|
||||
const fullUrl = `${baseUrl}${queryParams ? `?${queryParams}` : ''}`
|
||||
response = await nativeRequest<TeleportEventsResponse>(fullUrl, {
|
||||
method: 'GET',
|
||||
headers,
|
||||
params,
|
||||
timeout: 20000,
|
||||
validateStatus: status => status < 500,
|
||||
})
|
||||
} catch (e) {
|
||||
const err = e as AxiosError
|
||||
logError(new Error(`Teleport events fetch failed: ${err.message}`))
|
||||
if (isHttpError(e)) {
|
||||
logError(new Error(`Teleport events fetch failed: ${e.message}`))
|
||||
} else {
|
||||
logError(e)
|
||||
}
|
||||
logForDiagnosticsNoPII('error', 'teleport_events_fetch_fail')
|
||||
return null
|
||||
}
|
||||
@@ -423,13 +429,17 @@ async function fetchSessionLogsFromUrl(
|
||||
headers: Record<string, string>,
|
||||
): Promise<Entry[] | null> {
|
||||
try {
|
||||
const response = await axios.get(url, {
|
||||
const queryParams: Record<string, any> = {}
|
||||
if (isEnvTruthy(process.env.CLAUDE_AFTER_LAST_COMPACT)) {
|
||||
queryParams.after_last_compact = true
|
||||
}
|
||||
const queryString = new URLSearchParams(queryParams).toString()
|
||||
const fullUrl = `${url}${queryString ? `?${queryString}` : ''}`
|
||||
|
||||
const response = await nativeRequest<any>(fullUrl, {
|
||||
method: 'GET',
|
||||
headers,
|
||||
timeout: 20000,
|
||||
validateStatus: status => status < 500,
|
||||
params: isEnvTruthy(process.env.CLAUDE_AFTER_LAST_COMPACT)
|
||||
? { after_last_compact: true }
|
||||
: undefined,
|
||||
})
|
||||
|
||||
if (response.status === 200) {
|
||||
@@ -467,19 +477,20 @@ async function fetchSessionLogsFromUrl(
|
||||
)
|
||||
}
|
||||
|
||||
logForDebugging(
|
||||
`Failed to fetch session logs: ${response.status} ${response.statusText}`,
|
||||
)
|
||||
logForDebugging(`Failed to fetch session logs: ${response.status}`)
|
||||
logForDiagnosticsNoPII('error', 'session_get_fail_status', {
|
||||
status: response.status,
|
||||
})
|
||||
return null
|
||||
} catch (error) {
|
||||
const axiosError = error as AxiosError<SessionIngressError>
|
||||
logError(new Error(`Error fetching session logs: ${axiosError.message}`))
|
||||
logForDiagnosticsNoPII('error', 'session_get_fail_status', {
|
||||
status: axiosError.status,
|
||||
})
|
||||
if (isHttpError(error)) {
|
||||
logError(new Error(`Error fetching session logs: ${error.message}`))
|
||||
logForDiagnosticsNoPII('error', 'session_get_fail_status', {
|
||||
status: error.status,
|
||||
})
|
||||
} else {
|
||||
logError(error)
|
||||
}
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import axios from 'axios'
|
||||
import { getOauthConfig } from '../../constants/oauth.js'
|
||||
import { isClaudeAISubscriber } from '../../utils/auth.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { nativeRequest } from '../../utils/http.js'
|
||||
import { getOAuthHeaders, prepareApiRequest } from '../../utils/teleport/api.js'
|
||||
|
||||
export type UltrareviewQuotaResponse = {
|
||||
@@ -20,9 +20,10 @@ export async function fetchUltrareviewQuota(): Promise<UltrareviewQuotaResponse
|
||||
if (!isClaudeAISubscriber()) return null
|
||||
try {
|
||||
const { accessToken, orgUUID } = await prepareApiRequest()
|
||||
const response = await axios.get<UltrareviewQuotaResponse>(
|
||||
const response = await nativeRequest<UltrareviewQuotaResponse>(
|
||||
`${getOauthConfig().BASE_API_URL}/v1/ultrareview/quota`,
|
||||
{
|
||||
method: 'GET',
|
||||
headers: {
|
||||
...getOAuthHeaders(accessToken),
|
||||
'x-organization-uuid': orgUUID,
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import axios from 'axios'
|
||||
import { getOauthConfig } from '../../constants/oauth.js'
|
||||
import {
|
||||
getClaudeAIOAuthTokens,
|
||||
hasProfileScope,
|
||||
isClaudeAISubscriber,
|
||||
} from '../../utils/auth.js'
|
||||
import { getAuthHeaders } from '../../utils/http.js'
|
||||
import { getAuthHeaders, nativeRequest } from '../../utils/http.js'
|
||||
import { getClaudeCodeUserAgent } from '../../utils/userAgent.js'
|
||||
import { isOAuthTokenExpired } from '../oauth/client.js'
|
||||
|
||||
@@ -54,7 +53,8 @@ export async function fetchUtilization(): Promise<Utilization | null> {
|
||||
|
||||
const url = `${getOauthConfig().BASE_API_URL}/api/oauth/usage`
|
||||
|
||||
const response = await axios.get<Utilization>(url, {
|
||||
const response = await nativeRequest<Utilization>(url, {
|
||||
method: 'GET',
|
||||
headers,
|
||||
timeout: 5000, // 5 second timeout
|
||||
})
|
||||
|
||||
@@ -24,7 +24,7 @@ import {
|
||||
OAuthTokensSchema,
|
||||
} from '@modelcontextprotocol/sdk/shared/auth.js'
|
||||
import type { FetchLike } from '@modelcontextprotocol/sdk/shared/transport.js'
|
||||
import axios from 'axios'
|
||||
import { isHttpError, nativeRequest } from '../../utils/http.js'
|
||||
import { createHash, randomBytes, randomUUID } from 'crypto'
|
||||
import { mkdir } from 'fs/promises'
|
||||
import { createServer, type Server } from 'http'
|
||||
@@ -428,25 +428,30 @@ async function revokeToken({
|
||||
}
|
||||
|
||||
try {
|
||||
await axios.post(endpoint, params, { headers })
|
||||
await nativeRequest(endpoint, {
|
||||
method: 'POST',
|
||||
headers: { ...headers, 'Content-Type': 'application/x-www-form-urlencoded' },
|
||||
body: params.toString(),
|
||||
responseType: 'text',
|
||||
})
|
||||
logMCPDebug(serverName, `Successfully revoked ${tokenTypeHint}`)
|
||||
} catch (error: unknown) {
|
||||
// Fallback for non-RFC-7009-compliant servers that require Bearer auth
|
||||
if (
|
||||
axios.isAxiosError(error) &&
|
||||
error.response?.status === 401 &&
|
||||
isHttpError(error) &&
|
||||
error.status === 401 &&
|
||||
accessToken
|
||||
) {
|
||||
logMCPDebug(
|
||||
serverName,
|
||||
`Got 401, retrying ${tokenTypeHint} revocation with Bearer auth`,
|
||||
)
|
||||
// RFC 6749 §2.3.1: must not send more than one auth method. The retry
|
||||
// switches to Bearer — clear any client creds from the body.
|
||||
params.delete('client_id')
|
||||
params.delete('client_secret')
|
||||
await axios.post(endpoint, params, {
|
||||
headers: { ...headers, Authorization: `Bearer ${accessToken}` },
|
||||
await nativeRequest(endpoint, {
|
||||
method: 'POST',
|
||||
headers: { ...headers, Authorization: `Bearer ${accessToken}`, 'Content-Type': 'application/x-www-form-urlencoded' },
|
||||
body: params.toString(),
|
||||
responseType: 'text',
|
||||
})
|
||||
logMCPDebug(
|
||||
serverName,
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import axios from 'axios'
|
||||
import memoize from 'lodash-es/memoize.js'
|
||||
import { getOauthConfig } from 'src/constants/oauth.js'
|
||||
import {
|
||||
@@ -9,6 +8,7 @@ import { getClaudeAIOAuthTokens } from 'src/utils/auth.js'
|
||||
import { getGlobalConfig, saveGlobalConfig } from 'src/utils/config.js'
|
||||
import { logForDebugging } from 'src/utils/debug.js'
|
||||
import { isEnvDefinedFalsy } from 'src/utils/envUtils.js'
|
||||
import { nativeRequest } from 'src/utils/http.js'
|
||||
import { clearMcpAuthCache } from './client.js'
|
||||
import { normalizeNameForMCP } from './normalization.js'
|
||||
import type { ScopedMcpServerConfig } from './types.js'
|
||||
@@ -79,7 +79,7 @@ export const fetchClaudeAIMcpConfigsIfEligible = memoize(
|
||||
|
||||
logForDebugging(`[claudeai-mcp] Fetching from ${url}`)
|
||||
|
||||
const response = await axios.get<ClaudeAIMcpServersResponse>(url, {
|
||||
const { data: respData } = await nativeRequest<ClaudeAIMcpServersResponse>(url, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${tokens.accessToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
@@ -96,7 +96,7 @@ export const fetchClaudeAIMcpConfigsIfEligible = memoize(
|
||||
// colliding with "Example Server! (2)" which both normalize to claude_ai_Example_Server_2).
|
||||
const usedNormalizedNames = new Set<string>()
|
||||
|
||||
for (const server of response.data.data) {
|
||||
for (const server of respData.data) {
|
||||
const baseName = `claude.ai ${server.display_name}`
|
||||
|
||||
// Try without suffix first, then increment until we find an unused normalized name
|
||||
|
||||
@@ -43,7 +43,7 @@ import pMap from 'p-map'
|
||||
import { getOriginalCwd, getSessionId } from '../../bootstrap/state.js'
|
||||
import type { Command } from '../../commands.js'
|
||||
import { getOauthConfig } from '../../constants/oauth.js'
|
||||
import { PRODUCT_URL } from '../../constants/product.js'
|
||||
import { VERSION, PRODUCT_URL } from '../../constants/product.js'
|
||||
import type { AppState } from '../../state/AppState.js'
|
||||
import {
|
||||
type Tool,
|
||||
@@ -986,7 +986,7 @@ export const connectToServer = memoize(
|
||||
{
|
||||
name: 'claude-code',
|
||||
title: 'Claude Code',
|
||||
version: MACRO.VERSION ?? 'unknown',
|
||||
version: VERSION ?? 'unknown',
|
||||
description: "Anthropic's agentic coding tool",
|
||||
websiteUrl: PRODUCT_URL,
|
||||
},
|
||||
@@ -3281,7 +3281,7 @@ export async function setupSdkMcpClients(
|
||||
{
|
||||
name: 'claude-code',
|
||||
title: 'Claude Code',
|
||||
version: MACRO.VERSION ?? 'unknown',
|
||||
version: VERSION ?? 'unknown',
|
||||
description: "Anthropic's agentic coding tool",
|
||||
websiteUrl: PRODUCT_URL,
|
||||
},
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { getIsNonInteractiveSession } from '../../bootstrap/state.js'
|
||||
import { FEEDBACK_CHANNEL } from '../../constants/product.js'
|
||||
import { checkHasTrustDialogAccepted } from '../../utils/config.js'
|
||||
import { logAntError } from '../../utils/debug.js'
|
||||
import { errorMessage } from '../../utils/errors.js'
|
||||
@@ -48,7 +49,7 @@ export async function getMcpHeadersFromHelper(
|
||||
const hasTrust = checkHasTrustDialogAccepted()
|
||||
if (!hasTrust) {
|
||||
const error = new Error(
|
||||
`Security: headersHelper for MCP server '${serverName}' executed before workspace trust is confirmed. If you see this message, post in ${MACRO.FEEDBACK_CHANNEL}.`,
|
||||
`Security: headersHelper for MCP server '${serverName}' executed before workspace trust is confirmed. If you see this message, post in ${FEEDBACK_CHANNEL}.`,
|
||||
)
|
||||
logAntError('MCP headersHelper invoked before trust check', error)
|
||||
logEvent('tengu_mcp_headersHelper_missing_trust', {})
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import axios from 'axios'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { errorMessage } from '../../utils/errors.js'
|
||||
import { nativeRequest } from '../../utils/http.js'
|
||||
|
||||
type RegistryServer = {
|
||||
server: {
|
||||
@@ -36,13 +36,13 @@ export async function prefetchOfficialMcpUrls(): Promise<void> {
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await axios.get<RegistryResponse>(
|
||||
const { data } = await nativeRequest<RegistryResponse>(
|
||||
'https://api.anthropic.com/mcp-registry/v0/servers?version=latest&visibility=commercial',
|
||||
{ timeout: 5000 },
|
||||
)
|
||||
|
||||
const urls = new Set<string>()
|
||||
for (const entry of response.data.servers) {
|
||||
for (const entry of data.servers) {
|
||||
for (const remote of entry.server.remotes ?? []) {
|
||||
const normalized = normalizeUrl(remote.url)
|
||||
if (normalized) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// OAuth client for handling authentication flows with Claude services
|
||||
import axios from 'axios'
|
||||
import { isHttpError, nativeRequest } from '../../utils/http.js'
|
||||
import {
|
||||
type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
logEvent,
|
||||
@@ -127,10 +127,13 @@ export async function exchangeCodeForTokens(
|
||||
requestBody.expires_in = expiresIn
|
||||
}
|
||||
|
||||
const response = await axios.post(getOauthConfig().TOKEN_URL, requestBody, {
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
timeout: 15000,
|
||||
})
|
||||
const response = await nativeRequest(getOauthConfig().TOKEN_URL, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: requestBody,
|
||||
timeout: 15000,
|
||||
responseType: 'json',
|
||||
})
|
||||
|
||||
if (response.status !== 200) {
|
||||
throw new Error(
|
||||
@@ -162,11 +165,14 @@ export async function refreshOAuthToken(
|
||||
).join(' '),
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await axios.post(getOauthConfig().TOKEN_URL, requestBody, {
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
timeout: 15000,
|
||||
})
|
||||
try {
|
||||
const response = await nativeRequest(getOauthConfig().TOKEN_URL, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: requestBody,
|
||||
timeout: 15000,
|
||||
responseType: 'json',
|
||||
})
|
||||
|
||||
if (response.status !== 200) {
|
||||
throw new Error(`Token refresh failed: ${response.statusText}`)
|
||||
@@ -256,11 +262,11 @@ export async function refreshOAuthToken(
|
||||
}
|
||||
: undefined,
|
||||
}
|
||||
} catch (error) {
|
||||
const responseBody =
|
||||
axios.isAxiosError(error) && error.response?.data
|
||||
? JSON.stringify(error.response.data)
|
||||
: undefined
|
||||
} catch (error) {
|
||||
const responseBody =
|
||||
isHttpError(error) && error.data
|
||||
? JSON.stringify(error.data)
|
||||
: undefined
|
||||
logEvent('tengu_oauth_token_refresh_failure', {
|
||||
error: (error as Error)
|
||||
.message as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
@@ -274,11 +280,13 @@ export async function refreshOAuthToken(
|
||||
}
|
||||
|
||||
export async function fetchAndStoreUserRoles(
|
||||
accessToken: string,
|
||||
): Promise<void> {
|
||||
const response = await axios.get(getOauthConfig().ROLES_URL, {
|
||||
headers: { Authorization: `Bearer ${accessToken}` },
|
||||
})
|
||||
accessToken: string,
|
||||
): Promise<void> {
|
||||
const response = await nativeRequest(getOauthConfig().ROLES_URL, {
|
||||
method: 'GET',
|
||||
headers: { Authorization: `Bearer ${accessToken}` },
|
||||
responseType: 'json',
|
||||
})
|
||||
|
||||
if (response.status !== 200) {
|
||||
throw new Error(`Failed to fetch user roles: ${response.statusText}`)
|
||||
@@ -311,10 +319,12 @@ export async function fetchAndStoreUserRoles(
|
||||
export async function createAndStoreApiKey(
|
||||
accessToken: string,
|
||||
): Promise<string | null> {
|
||||
try {
|
||||
const response = await axios.post(getOauthConfig().API_KEY_URL, null, {
|
||||
headers: { Authorization: `Bearer ${accessToken}` },
|
||||
})
|
||||
try {
|
||||
const response = await nativeRequest(getOauthConfig().API_KEY_URL, {
|
||||
method: 'POST',
|
||||
headers: { Authorization: `Bearer ${accessToken}` },
|
||||
responseType: 'json',
|
||||
})
|
||||
|
||||
const apiKey = response.data?.raw_key
|
||||
if (apiKey) {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import axios from 'axios'
|
||||
import { getOauthConfig, OAUTH_BETA_HEADER } from 'src/constants/oauth.js'
|
||||
import type { OAuthProfileResponse } from 'src/services/oauth/types.js'
|
||||
import { getAnthropicApiKey } from 'src/utils/auth.js'
|
||||
import { getGlobalConfig } from 'src/utils/config.js'
|
||||
import { nativeRequest } from 'src/utils/http.js'
|
||||
import { logError } from 'src/utils/log.js'
|
||||
export async function getOauthProfileFromApiKey(): Promise<
|
||||
OAuthProfileResponse | undefined
|
||||
@@ -16,19 +16,16 @@ export async function getOauthProfileFromApiKey(): Promise<
|
||||
if (!accountUuid || !apiKey) {
|
||||
return
|
||||
}
|
||||
const endpoint = `${getOauthConfig().BASE_API_URL}/api/claude_cli_profile`
|
||||
const endpoint = `${getOauthConfig().BASE_API_URL}/api/claude_cli_profile?account_uuid=${encodeURIComponent(accountUuid)}`
|
||||
try {
|
||||
const response = await axios.get<OAuthProfileResponse>(endpoint, {
|
||||
const { data } = await nativeRequest<OAuthProfileResponse>(endpoint, {
|
||||
headers: {
|
||||
'x-api-key': apiKey,
|
||||
'anthropic-beta': OAUTH_BETA_HEADER,
|
||||
},
|
||||
params: {
|
||||
account_uuid: accountUuid,
|
||||
},
|
||||
timeout: 10000,
|
||||
})
|
||||
return response.data
|
||||
return data
|
||||
} catch (error) {
|
||||
logError(error as Error)
|
||||
}
|
||||
@@ -39,14 +36,14 @@ export async function getOauthProfileFromOauthToken(
|
||||
): Promise<OAuthProfileResponse | undefined> {
|
||||
const endpoint = `${getOauthConfig().BASE_API_URL}/api/oauth/profile`
|
||||
try {
|
||||
const response = await axios.get<OAuthProfileResponse>(endpoint, {
|
||||
const { data } = await nativeRequest<OAuthProfileResponse>(endpoint, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
timeout: 10000,
|
||||
})
|
||||
return response.data
|
||||
return data
|
||||
} catch (error) {
|
||||
logError(error as Error)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
* - API returns empty restrictions for users without policy limits
|
||||
*/
|
||||
|
||||
import axios from 'axios'
|
||||
import { isHttpError, nativeRequest, classifyHttpError } from '../../utils/http.js'
|
||||
import { createHash } from 'crypto'
|
||||
import { readFileSync as fsReadFileSync } from 'fs'
|
||||
import { unlink, writeFile } from 'fs/promises'
|
||||
@@ -30,7 +30,7 @@ import {
|
||||
import { registerCleanup } from '../../utils/cleanupRegistry.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { getClaudeConfigHomeDir } from '../../utils/envUtils.js'
|
||||
import { classifyAxiosError } from '../../utils/errors.js'
|
||||
// Removed classifyAxiosError import - using classifyHttpError from utils/http.js instead
|
||||
import { safeParseJSON } from '../../utils/json.js'
|
||||
import {
|
||||
getAPIProvider,
|
||||
@@ -312,22 +312,90 @@ async function fetchPolicyLimits(
|
||||
}
|
||||
}
|
||||
|
||||
const endpoint = getPolicyLimitsEndpoint()
|
||||
const headers: Record<string, string> = {
|
||||
...authHeaders.headers,
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
}
|
||||
const endpoint = getPolicyLimitsEndpoint()
|
||||
const headers: Record<string, string> = {
|
||||
...authHeaders.headers,
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
}
|
||||
|
||||
if (cachedChecksum) {
|
||||
headers['If-None-Match'] = `"${cachedChecksum}"`
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await nativeRequest(endpoint, {
|
||||
method: 'GET',
|
||||
headers,
|
||||
timeout: FETCH_TIMEOUT_MS,
|
||||
responseType: 'json',
|
||||
})
|
||||
|
||||
// Handle 304 Not Modified - cached version is still valid
|
||||
if (response.status === 304) {
|
||||
logForDebugging('Policy limits: Using cached restrictions (304)')
|
||||
return {
|
||||
success: true,
|
||||
restrictions: null, // Signal that cache is valid
|
||||
etag: cachedChecksum,
|
||||
}
|
||||
}
|
||||
|
||||
// Handle 404 Not Found - no policy limits exist or feature not enabled
|
||||
if (response.status === 404) {
|
||||
logForDebugging('Policy limits: No restrictions found (404)')
|
||||
return {
|
||||
success: true,
|
||||
restrictions: {},
|
||||
etag: undefined,
|
||||
}
|
||||
}
|
||||
|
||||
const parsed = PolicyLimitsResponseSchema().safeParse(response.data)
|
||||
if (!parsed.success) {
|
||||
logForDebugging(
|
||||
`Policy limits: Invalid response format - ${parsed.error.message}`,
|
||||
)
|
||||
return {
|
||||
success: false,
|
||||
error: 'Invalid policy limits format',
|
||||
}
|
||||
}
|
||||
|
||||
logForDebugging('Policy limits: Fetched successfully')
|
||||
return {
|
||||
success: true,
|
||||
restrictions: parsed.data.restrictions,
|
||||
}
|
||||
} catch (error) {
|
||||
// 404 is handled above via validateStatus, so it won't reach here
|
||||
const { kind, message } = classifyHttpError(error)
|
||||
switch (kind) {
|
||||
case 'auth':
|
||||
return {
|
||||
success: false,
|
||||
error: 'Not authorized for policy limits',
|
||||
skipRetry: true,
|
||||
}
|
||||
case 'timeout':
|
||||
return { success: false, error: 'Policy limits request timeout' }
|
||||
case 'network':
|
||||
return { success: false, error: 'Cannot connect to server' }
|
||||
default:
|
||||
return { success: false, error: message }
|
||||
}
|
||||
}
|
||||
|
||||
if (cachedChecksum) {
|
||||
headers['If-None-Match'] = `"${cachedChecksum}"`
|
||||
}
|
||||
|
||||
const response = await axios.get(endpoint, {
|
||||
headers,
|
||||
timeout: FETCH_TIMEOUT_MS,
|
||||
validateStatus: status =>
|
||||
status === 200 || status === 304 || status === 404,
|
||||
})
|
||||
try {
|
||||
const response = await nativeRequest(endpoint, {
|
||||
method: 'GET',
|
||||
headers,
|
||||
timeout: FETCH_TIMEOUT_MS,
|
||||
responseType: 'json',
|
||||
})
|
||||
|
||||
// Handle 304 Not Modified - cached version is still valid
|
||||
if (response.status === 304) {
|
||||
@@ -365,24 +433,24 @@ async function fetchPolicyLimits(
|
||||
success: true,
|
||||
restrictions: parsed.data.restrictions,
|
||||
}
|
||||
} catch (error) {
|
||||
// 404 is handled above via validateStatus, so it won't reach here
|
||||
const { kind, message } = classifyAxiosError(error)
|
||||
switch (kind) {
|
||||
case 'auth':
|
||||
return {
|
||||
success: false,
|
||||
error: 'Not authorized for policy limits',
|
||||
skipRetry: true,
|
||||
}
|
||||
case 'timeout':
|
||||
return { success: false, error: 'Policy limits request timeout' }
|
||||
case 'network':
|
||||
return { success: false, error: 'Cannot connect to server' }
|
||||
default:
|
||||
return { success: false, error: message }
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// 404 is handled above via validateStatus, so it won't reach here
|
||||
const { kind, message } = classifyHttpError(error)
|
||||
switch (kind) {
|
||||
case 'auth':
|
||||
return {
|
||||
success: false,
|
||||
error: 'Not authorized for policy limits',
|
||||
skipRetry: true,
|
||||
}
|
||||
case 'timeout':
|
||||
return { success: false, error: 'Policy limits request timeout' }
|
||||
case 'network':
|
||||
return { success: false, error: 'Cannot connect to server' }
|
||||
default:
|
||||
return { success: false, error: message }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
* - API returns empty settings for users without managed settings
|
||||
*/
|
||||
|
||||
import axios from 'axios'
|
||||
import { isHttpError, nativeRequest, classifyHttpError } from '../../utils/http.js'
|
||||
import { createHash } from 'crypto'
|
||||
import { open, unlink } from 'fs/promises'
|
||||
import { getOauthConfig, OAUTH_BETA_HEADER } from '../../constants/oauth.js'
|
||||
@@ -23,7 +23,7 @@ import {
|
||||
} from '../../utils/auth.js'
|
||||
import { registerCleanup } from '../../utils/cleanupRegistry.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { classifyAxiosError, getErrnoCode } from '../../utils/errors.js'
|
||||
import { getErrnoCode } from '../../utils/errors.js'
|
||||
import { settingsChangeDetector } from '../../utils/settings/changeDetector.js'
|
||||
import {
|
||||
type SettingsJson,
|
||||
@@ -248,40 +248,38 @@ async function fetchWithRetry(
|
||||
async function fetchRemoteManagedSettings(
|
||||
cachedChecksum?: string,
|
||||
): Promise<RemoteManagedSettingsFetchResult> {
|
||||
// Ensure OAuth token is fresh before fetching settings
|
||||
// This prevents 401 errors from stale cached tokens
|
||||
await checkAndRefreshOAuthTokenIfNeeded()
|
||||
|
||||
// Use local auth header getter to avoid circular dependency with getSettings()
|
||||
const authHeaders = getRemoteSettingsAuthHeaders()
|
||||
if (authHeaders.error) {
|
||||
// Auth errors should not be retried - return a special flag to skip retries
|
||||
return {
|
||||
success: false,
|
||||
error: `Authentication required for remote settings`,
|
||||
skipRetry: true,
|
||||
}
|
||||
}
|
||||
|
||||
const endpoint = getRemoteManagedSettingsEndpoint()
|
||||
const headers: Record<string, string> = {
|
||||
...authHeaders.headers,
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
}
|
||||
|
||||
// Add If-None-Match header for ETag-based caching
|
||||
if (cachedChecksum) {
|
||||
headers['If-None-Match'] = `"${cachedChecksum}"`
|
||||
}
|
||||
|
||||
try {
|
||||
// Ensure OAuth token is fresh before fetching settings
|
||||
// This prevents 401 errors from stale cached tokens
|
||||
await checkAndRefreshOAuthTokenIfNeeded()
|
||||
|
||||
// Use local auth header getter to avoid circular dependency with getSettings()
|
||||
const authHeaders = getRemoteSettingsAuthHeaders()
|
||||
if (authHeaders.error) {
|
||||
// Auth errors should not be retried - return a special flag to skip retries
|
||||
return {
|
||||
success: false,
|
||||
error: `Authentication required for remote settings`,
|
||||
skipRetry: true,
|
||||
}
|
||||
}
|
||||
|
||||
const endpoint = getRemoteManagedSettingsEndpoint()
|
||||
const headers: Record<string, string> = {
|
||||
...authHeaders.headers,
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
}
|
||||
|
||||
// Add If-None-Match header for ETag-based caching
|
||||
if (cachedChecksum) {
|
||||
headers['If-None-Match'] = `"${cachedChecksum}"`
|
||||
}
|
||||
|
||||
const response = await axios.get(endpoint, {
|
||||
const response = await nativeRequest(endpoint, {
|
||||
method: 'GET',
|
||||
headers,
|
||||
timeout: SETTINGS_TIMEOUT_MS,
|
||||
// Allow 204, 304, and 404 responses without treating them as errors.
|
||||
// 204/404 are returned when no settings exist for the user or the feature flag is off.
|
||||
validateStatus: status =>
|
||||
status === 200 || status === 204 || status === 304 || status === 404,
|
||||
responseType: 'json',
|
||||
})
|
||||
|
||||
// Handle 304 Not Modified - cached version is still valid
|
||||
@@ -337,7 +335,7 @@ async function fetchRemoteManagedSettings(
|
||||
checksum: parsed.data.checksum,
|
||||
}
|
||||
} catch (error) {
|
||||
const { kind, status, message } = classifyAxiosError(error)
|
||||
const { kind, status, message } = classifyHttpError(error)
|
||||
if (status === 404) {
|
||||
// 404 means no remote settings configured
|
||||
return { success: true, settings: {}, checksum: '' }
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
*/
|
||||
|
||||
import { feature } from 'bun:bundle'
|
||||
import axios from 'axios'
|
||||
import { isHttpError, nativeRequest } from '../../utils/http.js'
|
||||
import { mkdir, readFile, stat, writeFile } from 'fs/promises'
|
||||
import pickBy from 'lodash-es/pickBy.js'
|
||||
import { dirname } from 'path'
|
||||
@@ -27,7 +27,7 @@ import {
|
||||
import { clearMemoryFileCaches } from '../../utils/claudemd.js'
|
||||
import { getMemoryPath } from '../../utils/config.js'
|
||||
import { logForDiagnosticsNoPII } from '../../utils/diagLogs.js'
|
||||
import { classifyAxiosError } from '../../utils/errors.js'
|
||||
import { classifyHttpError } from '../../utils/errors.js'
|
||||
import { getRepoRemoteHash } from '../../utils/git.js'
|
||||
import {
|
||||
getAPIProvider,
|
||||
@@ -257,26 +257,37 @@ async function fetchUserSettingsOnce(): Promise<SettingsSyncFetchResult> {
|
||||
}
|
||||
}
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
...authHeaders.headers,
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
}
|
||||
const headers: Record<string, string> = {
|
||||
...authHeaders.headers,
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
}
|
||||
|
||||
const endpoint = getSettingsSyncEndpoint()
|
||||
const response = await axios.get(endpoint, {
|
||||
headers,
|
||||
timeout: SETTINGS_SYNC_TIMEOUT_MS,
|
||||
validateStatus: status => status === 200 || status === 404,
|
||||
})
|
||||
const endpoint = getSettingsSyncEndpoint()
|
||||
try {
|
||||
const response = await nativeRequest(endpoint, {
|
||||
method: 'GET',
|
||||
headers,
|
||||
timeout: SETTINGS_SYNC_TIMEOUT_MS,
|
||||
responseType: 'json',
|
||||
})
|
||||
|
||||
// 404 means no settings exist yet
|
||||
if (response.status === 404) {
|
||||
logForDiagnosticsNoPII('info', 'settings_sync_fetch_empty')
|
||||
return {
|
||||
success: true,
|
||||
isEmpty: true,
|
||||
}
|
||||
}
|
||||
logForDiagnosticsNoPII('info', 'settings_sync_fetch_success')
|
||||
return {
|
||||
success: true,
|
||||
data: response.data,
|
||||
isEmpty: false,
|
||||
}
|
||||
} catch (error) {
|
||||
if (isHttpError(error) && error.status === 404) {
|
||||
// 404 means no settings exist yet
|
||||
logForDiagnosticsNoPII('info', 'settings_sync_fetch_empty')
|
||||
return {
|
||||
success: true,
|
||||
isEmpty: true,
|
||||
}
|
||||
}
|
||||
throw error
|
||||
}
|
||||
|
||||
const parsed = UserSyncDataSchema().safeParse(response.data)
|
||||
if (!parsed.success) {
|
||||
@@ -293,23 +304,23 @@ async function fetchUserSettingsOnce(): Promise<SettingsSyncFetchResult> {
|
||||
data: parsed.data,
|
||||
isEmpty: false,
|
||||
}
|
||||
} catch (error) {
|
||||
const { kind, message } = classifyAxiosError(error)
|
||||
switch (kind) {
|
||||
case 'auth':
|
||||
return {
|
||||
success: false,
|
||||
error: 'Not authorized for settings sync',
|
||||
skipRetry: true,
|
||||
}
|
||||
case 'timeout':
|
||||
return { success: false, error: 'Settings sync request timeout' }
|
||||
case 'network':
|
||||
return { success: false, error: 'Cannot connect to server' }
|
||||
default:
|
||||
return { success: false, error: message }
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
const { kind, message } = classifyHttpError(error)
|
||||
switch (kind) {
|
||||
case 'auth':
|
||||
return {
|
||||
success: false,
|
||||
error: 'Not authorized for settings sync',
|
||||
skipRetry: true,
|
||||
}
|
||||
case 'timeout':
|
||||
return { success: false, error: 'Settings sync request timeout' }
|
||||
case 'network':
|
||||
return { success: false, error: 'Cannot connect to server' }
|
||||
default:
|
||||
return { success: false, error: message }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchUserSettings(
|
||||
@@ -358,30 +369,29 @@ async function uploadUserSettings(
|
||||
}
|
||||
}
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
...authHeaders.headers,
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
const headers: Record<string, string> = {
|
||||
...authHeaders.headers,
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
|
||||
const endpoint = getSettingsSyncEndpoint()
|
||||
const response = await axios.put(
|
||||
endpoint,
|
||||
{ entries },
|
||||
{
|
||||
headers,
|
||||
timeout: SETTINGS_SYNC_TIMEOUT_MS,
|
||||
},
|
||||
)
|
||||
const endpoint = getSettingsSyncEndpoint()
|
||||
const response = await nativeRequest(endpoint, {
|
||||
method: 'PUT',
|
||||
headers,
|
||||
body: { entries },
|
||||
timeout: SETTINGS_SYNC_TIMEOUT_MS,
|
||||
responseType: 'json',
|
||||
})
|
||||
|
||||
logForDiagnosticsNoPII('info', 'settings_sync_uploaded', {
|
||||
entryCount: Object.keys(entries).length,
|
||||
})
|
||||
return {
|
||||
success: true,
|
||||
checksum: response.data?.checksum,
|
||||
lastModified: response.data?.lastModified,
|
||||
}
|
||||
logForDiagnosticsNoPII('info', 'settings_sync_uploaded', {
|
||||
entryCount: Object.keys(entries).length,
|
||||
})
|
||||
return {
|
||||
success: true,
|
||||
checksum: response.data?.checksum,
|
||||
lastModified: response.data?.lastModified,
|
||||
}
|
||||
} catch (error) {
|
||||
logForDiagnosticsNoPII('warn', 'settings_sync_upload_error')
|
||||
return {
|
||||
|
||||
@@ -24,7 +24,6 @@
|
||||
* This avoids module-level mutable state and gives tests natural isolation.
|
||||
*/
|
||||
|
||||
import axios from 'axios'
|
||||
import { createHash } from 'crypto'
|
||||
import { mkdir, readdir, readFile, stat, writeFile } from 'fs/promises'
|
||||
import { join, relative, sep } from 'path'
|
||||
@@ -45,8 +44,9 @@ import {
|
||||
getClaudeAIOAuthTokens,
|
||||
} from '../../utils/auth.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { classifyAxiosError } from '../../utils/errors.js'
|
||||
import { classifyHttpError } from '../../utils/errors.js'
|
||||
import { getGithubRepo } from '../../utils/git.js'
|
||||
import { isHttpError, nativeRequest } from '../../utils/http.js'
|
||||
import {
|
||||
getAPIProvider,
|
||||
isFirstPartyAnthropicBaseUrl,
|
||||
@@ -209,11 +209,10 @@ async function fetchTeamMemoryOnce(
|
||||
}
|
||||
|
||||
const endpoint = getTeamMemorySyncEndpoint(repoSlug)
|
||||
const response = await axios.get(endpoint, {
|
||||
const response = await nativeRequest<any>(endpoint, {
|
||||
method: 'GET',
|
||||
headers,
|
||||
timeout: TEAM_MEMORY_SYNC_TIMEOUT_MS,
|
||||
validateStatus: status =>
|
||||
status === 200 || status === 304 || status === 404,
|
||||
})
|
||||
|
||||
if (response.status === 304) {
|
||||
@@ -264,10 +263,8 @@ async function fetchTeamMemoryOnce(
|
||||
checksum: responseChecksum,
|
||||
}
|
||||
} catch (error) {
|
||||
const { kind, status, message } = classifyAxiosError(error)
|
||||
const body = axios.isAxiosError(error)
|
||||
? JSON.stringify(error.response?.data ?? '')
|
||||
: ''
|
||||
const { kind, status, message } = classifyHttpError(error)
|
||||
const body = isHttpError(error) ? JSON.stringify(error.data ?? '') : ''
|
||||
if (kind !== 'other') {
|
||||
logForDebugging(`team-memory-sync: fetch error ${status}: ${body}`, {
|
||||
level: 'warn',
|
||||
@@ -324,10 +321,10 @@ async function fetchTeamMemoryHashes(
|
||||
}
|
||||
|
||||
const endpoint = getTeamMemorySyncEndpoint(repoSlug) + '&view=hashes'
|
||||
const response = await axios.get(endpoint, {
|
||||
const response = await nativeRequest<any>(endpoint, {
|
||||
method: 'GET',
|
||||
headers: auth.headers,
|
||||
timeout: TEAM_MEMORY_SYNC_TIMEOUT_MS,
|
||||
validateStatus: status => status === 200 || status === 404,
|
||||
})
|
||||
|
||||
if (response.status === 404) {
|
||||
@@ -360,7 +357,7 @@ async function fetchTeamMemoryHashes(
|
||||
entryChecksums,
|
||||
}
|
||||
} catch (error) {
|
||||
const { kind, status, message } = classifyAxiosError(error)
|
||||
const { kind, status, message } = classifyHttpError(error)
|
||||
switch (kind) {
|
||||
case 'auth':
|
||||
return {
|
||||
@@ -482,15 +479,12 @@ async function uploadTeamMemory(
|
||||
}
|
||||
|
||||
const endpoint = getTeamMemorySyncEndpoint(repoSlug)
|
||||
const response = await axios.put(
|
||||
endpoint,
|
||||
{ entries },
|
||||
{
|
||||
headers,
|
||||
timeout: TEAM_MEMORY_SYNC_TIMEOUT_MS,
|
||||
validateStatus: status => status === 200 || status === 412,
|
||||
},
|
||||
)
|
||||
const response = await nativeRequest<any>(endpoint, {
|
||||
method: 'PUT',
|
||||
body: { entries },
|
||||
headers,
|
||||
timeout: TEAM_MEMORY_SYNC_TIMEOUT_MS,
|
||||
})
|
||||
|
||||
if (response.status === 412) {
|
||||
logForDebugging('team-memory-sync: conflict (412 Precondition Failed)', {
|
||||
@@ -514,14 +508,12 @@ async function uploadTeamMemory(
|
||||
lastModified: response.data?.lastModified,
|
||||
}
|
||||
} catch (error) {
|
||||
const body = axios.isAxiosError(error)
|
||||
? JSON.stringify(error.response?.data ?? '')
|
||||
: ''
|
||||
const body = isHttpError(error) ? JSON.stringify(error.data ?? '') : ''
|
||||
logForDebugging(
|
||||
`team-memory-sync: upload failed: ${error instanceof Error ? error.message : ''} ${body}`,
|
||||
{ level: 'warn' },
|
||||
)
|
||||
const { kind, status: httpStatus, message } = classifyAxiosError(error)
|
||||
const { kind, status: httpStatus, message } = classifyHttpError(error)
|
||||
const errorType = kind === 'http' || kind === 'other' ? 'unknown' : kind
|
||||
let serverErrorCode: 'team_memory_too_many_entries' | undefined
|
||||
let serverMaxEntries: number | undefined
|
||||
@@ -530,10 +522,8 @@ async function uploadTeamMemory(
|
||||
// RequestTooLargeException includes error_code + extra_details with
|
||||
// the effective max_entries (may be GB-tuned per-org). Cache it so
|
||||
// the next push trims to the right value.
|
||||
if (httpStatus === 413 && axios.isAxiosError(error)) {
|
||||
const parsed = TeamMemoryTooManyEntriesSchema().safeParse(
|
||||
error.response?.data,
|
||||
)
|
||||
if (httpStatus === 413 && isHttpError(error)) {
|
||||
const parsed = TeamMemoryTooManyEntriesSchema().safeParse(error.data)
|
||||
if (parsed.success) {
|
||||
serverErrorCode = parsed.data.error.details.error_code
|
||||
serverMaxEntries = parsed.data.error.details.max_entries
|
||||
|
||||
Reference in New Issue
Block a user