Skip to content

Commit

Permalink
Use Logger in embedder, chronos and gql-executor
Browse files Browse the repository at this point in the history
  • Loading branch information
Dschoordsch committed Dec 9, 2024
1 parent 08273c1 commit 0c20475
Show file tree
Hide file tree
Showing 8 changed files with 26 additions and 16 deletions.
7 changes: 4 additions & 3 deletions packages/chronos/chronos.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import {CronJob} from 'cron'
import getGraphQLExecutor from 'parabol-server/utils/getGraphQLExecutor'
import publishWebhookGQL from 'parabol-server/utils/publishWebhookGQL'
import {Logger} from 'parabol-server/utils/Logger'

interface PossibleJob {
onTick(): void
Expand Down Expand Up @@ -124,13 +125,13 @@ const chronos = () => {
cronTime: cronTime!,
onTick
})
console.log(`🌱 Chronos Job ${name}: STARTED`)
Logger.log(`🌱 Chronos Job ${name}: STARTED`)
} catch {
console.log(`🌱 Chronos Job ${name}: SKIPPED`)
Logger.log(`🌱 Chronos Job ${name}: SKIPPED`)
}
})

console.log(`\n🌾🌾🌾 Server ID: ${SERVER_ID}. Ready for Chronos 🌾🌾🌾`)
Logger.log(`\n🌾🌾🌾 Server ID: ${SERVER_ID}. Ready for Chronos 🌾🌾🌾`)
}

chronos()
5 changes: 3 additions & 2 deletions packages/embedder/ai_models/OpenAIGeneration.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import {
GenerationModelParams,
GenerationOptions
} from './AbstractGenerationModel'
import {Logger} from '../../server/utils/Logger'

export type ModelId = 'gpt-3.5-turbo-0125' | 'gpt-4-turbo-preview'

Expand Down Expand Up @@ -37,7 +38,7 @@ export class OpenAIGeneration extends AbstractGenerationModel {
async summarize(content: string, options: OpenAIGenerationOptions) {
if (!this.openAIApi) {
const eMsg = 'OpenAI is not configured'
console.log('OpenAIGenerationSummarizer.summarize(): ', eMsg)
Logger.log('OpenAIGenerationSummarizer.summarize(): ', eMsg)
throw new Error(eMsg)
}
const {maxNewTokens: max_tokens = 512, seed, stop, temperature = 0.8, topP: top_p} = options
Expand All @@ -64,7 +65,7 @@ export class OpenAIGeneration extends AbstractGenerationModel {
if (!maybeSummary) throw new Error('OpenAI returned empty summary')
return maybeSummary
} catch (e) {
console.log('OpenAIGenerationSummarizer.summarize(): ', e)
Logger.log('OpenAIGenerationSummarizer.summarize(): ', e)
throw e
}
}
Expand Down
3 changes: 2 additions & 1 deletion packages/embedder/ai_models/TextGenerationInference.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import {
GenerationOptions
} from './AbstractGenerationModel'
import fetchWithRetry from './helpers/fetchWithRetry'
import {Logger} from '../../server/utils/Logger'

const MAX_REQUEST_TIME_S = 3 * 60

Expand Down Expand Up @@ -52,7 +53,7 @@ export class TextGenerationInference extends AbstractGenerationModel {
throw new Error('TextGenerationInference.summarize(): malformed response')
return json.generated_text as string
} catch (e) {
console.log('TextGenerationInferenceSummarizer.summarize(): timeout')
Logger.log('TextGenerationInferenceSummarizer.summarize(): timeout')
throw e
}
}
Expand Down
8 changes: 5 additions & 3 deletions packages/embedder/ai_models/helpers/fetchWithRetry.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import {Logger} from '../../../server/utils/Logger'

interface FetchWithRetryOptions extends RequestInit {
deadline: Date // Deadline for the request to complete
debug?: boolean // Enable debug tracing
Expand All @@ -22,7 +24,7 @@ export default async (url: RequestInfo, options: FetchWithRetryOptions): Promise
attempt++

if (debug) {
console.log(`Attempt ${attempt}: Fetching ${JSON.stringify(url)}`)
Logger.log(`Attempt ${attempt}: Fetching ${JSON.stringify(url)}`)
}

const response = await fetch(url, fetchOptions)
Expand All @@ -40,7 +42,7 @@ export default async (url: RequestInfo, options: FetchWithRetryOptions): Promise
waitTime = Math.min(waitTime, deadline.getTime() - Date.now())

if (debug) {
console.log(
Logger.log(
`Waiting ${waitTime / 1000} seconds before retrying due to status ${response.status}...`
)
}
Expand All @@ -54,7 +56,7 @@ export default async (url: RequestInfo, options: FetchWithRetryOptions): Promise
throw new Error('Request aborted due to deadline')
}
if (debug) {
console.error(`Attempt ${attempt} failed: ${error}`)
Logger.error(`Attempt ${attempt} failed: ${error}`)
}
const currentTime = Date.now()
if (currentTime >= deadline.getTime()) {
Expand Down
5 changes: 3 additions & 2 deletions packages/embedder/debug.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import '../../scripts/webpack/utils/dotenv'
import getKysely from '../server/postgres/getKysely'
import {WorkflowOrchestrator} from './WorkflowOrchestrator'
import {Logger} from '../server/utils/Logger'

const debugFailedJob = async () => {
const pg = getKysely()
Expand All @@ -14,11 +15,11 @@ const debugFailedJob = async () => {
.executeTakeFirst()

if (!failedJob) {
console.log('No failed jobs found')
Logger.log('No failed jobs found')
return
}

console.log('Debugging job:', failedJob.id)
Logger.log('Debugging job:', failedJob.id)
const orch = new WorkflowOrchestrator()
await orch.runStep(failedJob as any)
// const man = getModelManager()
Expand Down
4 changes: 3 additions & 1 deletion packages/embedder/logMemoryUse.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import {Logger} from '../server/utils/Logger'

// Not for use in prod, but useful for dev
export const logMemoryUse = () => {
const MB = 2 ** 20
setInterval(() => {
const memoryUsage = process.memoryUsage()
const {rss} = memoryUsage
const usedMB = Math.floor(rss / MB)
console.log('Memory use:', usedMB, 'MB')
Logger.log('Memory use:', usedMB, 'MB')
}, 10000).unref()
}
7 changes: 4 additions & 3 deletions packages/gql-executor/gqlExecutor.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import '../server/monkeyPatchFetch'
import {GQLRequest} from '../server/types/custom'
import RedisInstance from '../server/utils/RedisInstance'
import RedisStream from './RedisStream'
import {Logger} from '../server/utils/Logger'

tracer.init({
service: `gql`,
Expand All @@ -31,14 +32,14 @@ const run = async () => {

// on shutdown, remove consumer from the group
process.on('SIGTERM', async (signal) => {
console.log(`Server ID: ${SERVER_ID}. Kill signal received: ${signal}, starting graceful shutdown.`)
Logger.log(`Server ID: ${SERVER_ID}. Kill signal received: ${signal}, starting graceful shutdown.`)
await publisher.xgroup(
'DELCONSUMER',
ServerChannel.GQL_EXECUTOR_STREAM,
ServerChannel.GQL_EXECUTOR_CONSUMER_GROUP,
executorChannel
)
console.log(`Server ID: ${SERVER_ID}. Graceful shutdown complete, exiting.`)
Logger.log(`Server ID: ${SERVER_ID}. Graceful shutdown complete, exiting.`)
process.exit()
})

Expand Down Expand Up @@ -71,7 +72,7 @@ const run = async () => {
ServerChannel.GQL_EXECUTOR_CONSUMER_GROUP,
executorChannel
)
console.log(`\nπŸ’§πŸ’§πŸ’§ Server ID: ${SERVER_ID}. Ready for GraphQL Execution πŸ’§πŸ’§πŸ’§`)
Logger.log(`\nπŸ’§πŸ’§πŸ’§ Server ID: ${SERVER_ID}. Ready for GraphQL Execution πŸ’§πŸ’§πŸ’§`)

for await (const message of incomingStream) {
// don't await the call below so this instance can immediately call incomingStream.next()
Expand Down
3 changes: 2 additions & 1 deletion packages/server/hubSpot/backfillHubSpot.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
// call with yarn sucrase-node hubSpot/backfillHubSpot.ts
import '../../../scripts/webpack/utils/dotenv'
import {getUsersByEmails} from '../postgres/queries/getUsersByEmails'
import {Logger} from '../utils/Logger'

const contactKeys = {
lastMetAt: 'last_met_at',
Expand Down Expand Up @@ -49,7 +50,7 @@ const upsertHubspotContact = async (
)
if (!String(res.status).startsWith('2')) {
const responseBody = await res.json()
console.error(`Failed to update HubSpot for ${email}: `, responseBody.message)
Logger.error(`Failed to update HubSpot for ${email}: `, responseBody.message)
}
}

Expand Down

0 comments on commit 0c20475

Please sign in to comment.