1st commit

This commit is contained in:
Koushik Roy 2025-06-11 10:42:40 +05:30
commit 9613a008be
20 changed files with 9228 additions and 0 deletions

26
.eslintrc.js Normal file
View File

@ -0,0 +1,26 @@
module.exports = {
root: true,
parser: '@typescript-eslint/parser',
parserOptions: {
ecmaVersion: 2020,
sourceType: 'module',
project: './tsconfig.json',
},
plugins: ['@typescript-eslint'],
extends: [
'eslint:recommended',
'plugin:@typescript-eslint/recommended',
'plugin:prettier/recommended',
],
env: {
node: true,
jest: true,
},
rules: {
'@typescript-eslint/explicit-function-return-type': 'off',
'@typescript-eslint/explicit-module-boundary-types': 'off',
'@typescript-eslint/no-explicit-any': 'off',
'@typescript-eslint/no-unused-vars': ['warn', { argsIgnorePattern: '^_' }],
'prettier/prettier': 'warn',
},
};

51
.gitignore vendored Normal file
View File

@ -0,0 +1,51 @@
# Dependencies
node_modules/
.pnp
.pnp.js
# Testing
coverage/
# Production
build/
dist/
# Misc
.DS_Store
*.pem
# Debug logs
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Environment variables
.env
.env.local
.env.development.local
.env.test.local
.env.production.local
# Editor directories and files
.idea
.vscode/*
!.vscode/extensions.json
.idea
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
# Logs
logs
*.log
# OS generated files
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db

10
.prettierrc Normal file
View File

@ -0,0 +1,10 @@
{
"semi": true,
"trailingComma": "es5",
"singleQuote": true,
"printWidth": 100,
"tabWidth": 2,
"useTabs": false,
"bracketSpacing": true,
"arrowParens": "avoid"
}

87
README.md Normal file
View File

@ -0,0 +1,87 @@
# Gemini MCP Client
A TypeScript client library for interacting with Gemini MCP (Multi-Cloud Platform) services.
## Installation
```bash
npm install gemini-mcp
```
## Features
- TypeScript support out of the box
- Promise-based API
- Comprehensive type definitions
- Easy integration with Node.js applications
## Usage
```typescript
import { ClientMCP } from 'gemini-mcp';
// Create a new instance
const client = new ClientMCP({
apiKey: 'your-api-key',
baseUrl: 'https://api.gemini-mcp.com/v1'
});
// Use the client
async function getBlogs() {
try {
const blogs = await client.getBlogs();
console.log(blogs);
} catch (error) {
console.error('Error fetching blogs:', error);
}
}
getBlogs();
```
## API Reference
### `new ClientMCP(config: ClientMCPConfig)`
Creates a new Gemini MCP client instance.
#### Parameters
- `config` (Object): Configuration object
- `apiKey` (string): Your Gemini MCP API key
- `baseUrl` (string): Base URL for the API (default: 'https://api.gemini-mcp.com/v1')
- `timeout` (number): Request timeout in milliseconds (default: 30000)
### Methods
#### `getBlogs(): Promise<Blog[]>`
Fetches all blogs for the authenticated user.
#### `getBlog(id: string): Promise<Blog>`
Fetches a specific blog by ID.
#### `createBlog(blog: BlogCreate): Promise<Blog>`
Creates a new blog.
## Development
1. Clone the repository
2. Install dependencies:
```bash
npm install
```
3. Build the project:
```bash
npm run build
```
4. Run tests:
```bash
npm test
```
## License
MIT

17
jest.config.js Normal file
View File

@ -0,0 +1,17 @@
module.exports = {
preset: 'ts-jest',
testEnvironment: 'node',
testRegex: '(/__tests__/.*|\\.(test|spec))\\.ts$',
moduleFileExtensions: ['ts', 'js', 'json', 'node'],
collectCoverage: true,
coverageDirectory: 'coverage',
coverageReporters: ['text', 'lcov'],
coveragePathIgnorePatterns: [
'/node_modules/',
'/dist/',
'/coverage/',
'/__tests__/'
],
verbose: true,
testTimeout: 10000
};

7062
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

68
package.json Normal file
View File

@ -0,0 +1,68 @@
{
"name": "client-mcp",
"version": "0.1.0",
"description": "A TypeScript client for interacting with MCP services",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"files": [
"dist"
],
"scripts": {
"build": "tsc",
"clean": "rimraf dist",
"prepare": "npm run build",
"prepublishOnly": "npm run lint && npm run test",
"preversion": "npm run lint",
"version": "npm run format && git add -A src",
"postversion": "git push && git push --tags",
"dev": "ts-node-dev --respawn --transpile-only src/index.ts",
"format": "prettier --write \"src/**/*.ts\" \"src/**/*.js\"",
"lint": "eslint src --ext .ts",
"test": "jest --passWithNoTests",
"test:watch": "jest --watch"
},
"keywords": [
"gemini",
"openai",
"mcp",
"typescript",
"client",
"api"
],
"author": "",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/kroy665/client-mcp.git"
},
"bugs": {
"url": "https://github.com/kroy665/client-mcp/issues"
},
"engines": {
"node": ">=14.0.0"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.53.0",
"@modelcontextprotocol/sdk": "^1.12.1",
"cross-fetch": "^3.1.5",
"openai": "^5.1.1"
},
"devDependencies": {
"@types/jest": "^29.5.14",
"@types/mocha": "^10.0.10",
"@types/node": "^20.11.28",
"@typescript-eslint/eslint-plugin": "^6.0.0",
"@typescript-eslint/parser": "^6.0.0",
"eslint": "^8.0.0",
"eslint-config-prettier": "^9.0.0",
"eslint-plugin-prettier": "^5.0.0",
"jest": "^29.0.0",
"jest-mock-extended": "^4.0.0-beta1",
"prettier": "^3.0.0",
"rimraf": "^5.0.0",
"ts-jest": "^29.3.4",
"ts-node": "^10.9.2",
"ts-node-dev": "^2.0.0",
"typescript": "^5.3.3"
}
}

584
src/client.ts Normal file
View File

@ -0,0 +1,584 @@
import { Logger } from './utils/logger';
import {
ClientMCPConfig,
ChatChunk,
ConversationState
} from './types';
import OpenAI from "openai";
import {
Tool as AnthropicTool
} from "@anthropic-ai/sdk/resources/messages/messages.mjs";
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js";
import { convertContext } from "./utils/convertContext";
/**
* Production-ready MCP (Model Context Protocol) client for AI conversations with tool support
*
* @example
* ```typescript
* const client = new ClientMCP({
* apiKey: "your-api-key",
* baseUrl: "https://your-model-context-protocol-server.com",
* model: "gemini-2.0-flash",
* debug: true
* });
*
* await client.connectToServer("http://localhost:3003/mcp");
*
* for await (const chunk of client.chat("Hello, how are you?")) {
* console.log(chunk.choices[0]?.delta?.content);
* }
* ```
*/
export class ClientMCP {
private static readonly DEFAULT_TIMEOUT = 30000;
private static readonly DEFAULT_MODEL = "gemini-2.0-flash";
private static readonly MAX_RECURSION_DEPTH = 5;
private readonly config: Required<Pick<ClientMCPConfig, 'timeout' | 'debug'>> & {
apiKey: string;
model: string;
baseUrl?: string;
};
private readonly logger: Logger;
private readonly ai: OpenAI;
private readonly mcp: Client;
private transport: StdioClientTransport | SSEClientTransport | null = null;
private tools: AnthropicTool[] = [];
private functionDeclarations: OpenAI.Chat.Completions.ChatCompletionTool[] = [];
private messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = [];
private abortController: AbortController;
private isConnected = false;
private recentConversationLength = 0;
constructor(config: ClientMCPConfig) {
this.validateConfig(config);
this.config = this.normalizeConfig(config);
this.logger = Logger.getInstance(this.config.debug);
this.ai = this.initializeOpenAI();
this.mcp = this.initializeMCP();
this.abortController = new AbortController();
this.addMessage(config?.systemMessages || "You are a smart AI assistant. You have access to tools to help you with your tasks. so you have to plan your actions carefully that you can use those tools and complete your task. \n" +
"You have to refactor your plan if it is not working.\n" +
"You have to create plan at the start of any task \n" +
"You can use multiple tools to complete your task. \n" +
"You can use tools one after one, or simultaneously. \n",
"system");
}
/**
* Establishes connection to MCP server
*
* @param serverPath - URL or string path to the MCP server
* @param sessionId - Optional session ID for reconnection
*/
public async connectToServer(serverPath: string | URL, sessionId?: string): Promise<void> {
if (this.isConnected) {
this.logger.warn('Already connected to MCP server');
return;
}
try {
this.transport = await this.createTransport(serverPath, sessionId);
await this.mcp.connect(this.transport);
await this.loadTools();
this.isConnected = true;
this.logger.info(`Successfully connected to MCP server: ${serverPath}`);
} catch (error) {
this.logger.error('Failed to connect to MCP server:', error);
throw new Error(`MCP connection failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
/**
* Disconnects from MCP server
*/
public async disconnect(): Promise<void> {
if (!this.isConnected) return;
try {
if (this.transport) {
await this.transport.close();
this.transport = null;
}
this.isConnected = false;
this.logger.info('Disconnected from MCP server');
} catch (error) {
this.logger.error('Error during disconnect:', error);
}
}
/**
* Performs streaming chat completion
*
* @param options - Options for the chat completion
* @returns Async generator of chat completion chunks
*/
public async *chatCompletionStream(options: {
messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[];
reasoningEffort?: "low" | "medium" | "high";
tools?: OpenAI.Chat.Completions.ChatCompletionTool[];
toolChoice?: OpenAI.Chat.Completions.ChatCompletionCreateParams['tool_choice'];
}): AsyncGenerator<OpenAI.Chat.Completions.ChatCompletionChunk> {
try {
const stream = await this.ai.chat.completions.create({
model: this.config.model,
messages: options.messages,
...(options.reasoningEffort && { reasoning_effort: options.reasoningEffort }),
...(options.tools && { tools: options.tools }),
...(options.toolChoice && { tool_choice: options.toolChoice }),
stream: true,
});
for await (const chunk of stream) {
yield chunk;
}
} catch (error) {
this.logger.error('Streaming chat completion failed:', error);
throw this.handleApiError(error);
}
}
/**
* Main chat interface with automatic tool handling and conversation management
*
* @param content - User input message
* @param options - Options for the chat with maxDepth and autoSummarize
* Where maxDepth is the maximum depth of recursion and autoSummarize is whether to automatically summarize the conversation
* @returns Async generator of chat completion chunks and tool calls
*/
public async *chat(
content: string,
options: {
maxDepth?: number;
autoSummarize?: boolean;
} = {}
): AsyncGenerator<OpenAI.Chat.Completions.ChatCompletionChunk | ChatChunk> {
const { maxDepth = ClientMCP.MAX_RECURSION_DEPTH, autoSummarize = false } = options;
this.recentConversationLength = 0;
yield* this.chatInternal(content, 0, maxDepth, autoSummarize);
}
/**
* Cancels all ongoing requests
*/
public cancelRequests(): void {
this.abortController.abort();
this.abortController = new AbortController();
this.logger.info('All requests cancelled');
}
/**
* Gets current conversation state
*
* @returns Current conversation state
*/
public getConversationState(): ConversationState {
return {
messageCount: this.messages.length,
toolsAvailable: this.tools.length,
isConnected: this.isConnected,
lastActivity: new Date()
};
}
/**
* Clears conversation history
*/
public clearConversation(): void {
const systemMessages = this.messages.filter(msg => msg.role === 'system');
this.messages = systemMessages;
this.logger.info('Conversation history cleared');
}
// Private methods
private validateConfig(config: ClientMCPConfig): void {
if (!config.apiKey?.trim()) {
throw new Error('API key is required and cannot be empty');
}
}
private normalizeConfig(config: ClientMCPConfig) {
return {
timeout: config.timeout ?? ClientMCP.DEFAULT_TIMEOUT,
debug: config.debug ?? false,
apiKey: config.apiKey.trim(),
model: config.model ?? ClientMCP.DEFAULT_MODEL,
baseUrl: config.baseUrl
};
}
private initializeOpenAI(): OpenAI {
return new OpenAI({
apiKey: this.config.apiKey,
...(this.config.baseUrl && { baseURL: this.config.baseUrl }),
timeout: this.config.timeout
});
}
private initializeMCP(): Client {
return new Client(
{ name: "mcp-client-cli", version: "1.0.0" },
{ capabilities: { tools: {} } }
);
}
private async createTransport(
serverPath: string | URL,
sessionId?: string
): Promise<StdioClientTransport | SSEClientTransport> {
if (typeof serverPath === 'string') {
return this.createStdioTransport(serverPath);
} else {
return this.createSSETransport(serverPath, sessionId);
}
}
private createStdioTransport(serverPath: string): StdioClientTransport {
const isJavaScript = serverPath.endsWith('.js');
const isPython = serverPath.endsWith('.py');
if (!isJavaScript && !isPython) {
throw new Error('Server script must be a .js or .py file');
}
const command = isPython
? (process.platform === "win32" ? "python" : "python3")
: process.execPath;
return new StdioClientTransport({
command,
args: [serverPath]
});
}
private createSSETransport(serverUrl: URL, sessionId?: string): SSEClientTransport {
return new SSEClientTransport(serverUrl, {
requestInit: {
headers: {
'Accept': 'text/event-stream',
'Content-Type': 'application/json',
...(sessionId && { 'mcp-session-id': sessionId })
}
}
});
}
public async loadLocalFunctionDeclarations(): Promise<void> {
const generate_custom_tool = {
"type": "function",
"function": {
"name": "generate_custom_tool",
"description": "You will create a python function based on the user's request. \n" +
"The function will be used to complete the user's request. \n" +
"The function will be added to the tools list. \n" +
"The function will be used in the next step." +
"This tool is only be used when there is no tool available to do the specified task." +
"If you do not have the functionality for the user's request, you can use this tool to create a python function to do the specified task.\n",
"parameters": {
"type": "object",
"properties": {
"tool_name": {
"type": "string",
"description": "The name of the tool"
},
"tool_description": {
"type": "string",
"description": "The description of the tool"
},
"tool_input_schema": {
"type": "object",
"description": "The input schema of the tool"
},
"tool_python_code": {
"type": "string",
"description": "The python code of the tool"
}
},
"required": ["tool_name", "tool_description", "tool_input_schema", "tool_python_code"]
}
}
} as OpenAI.Chat.Completions.ChatCompletionTool;
this.functionDeclarations.push(generate_custom_tool);
}
private async loadTools(): Promise<void> {
const toolsResult = await this.mcp.listTools();
this.tools = toolsResult.tools.map(tool => ({
name: tool.name,
description: tool.description,
input_schema: tool.inputSchema
}));
this.functionDeclarations = convertContext(this.tools);
// this.loadLocalFunctionDeclarations();
this.logger.info(`Loaded ${this.tools.length} tools:`,
this.tools.map(tool => `${tool.name}: ${tool.description}`).join(', ')
);
}
private async *chatInternal(
content: string,
depth: number,
maxDepth: number,
autoSummarize: boolean
): AsyncGenerator<OpenAI.Chat.Completions.ChatCompletionChunk | ChatChunk> {
if (depth >= maxDepth) {
yield* this.handleMaxDepthReached(content, maxDepth);
return;
}
this.addMessage(content, "user");
let assistantResponse = '';
// Stream the main conversation
for await (const chunk of this.chatCompletionStream({
messages: this.messages,
tools: this.functionDeclarations.length > 0 ? this.functionDeclarations : undefined
})) {
const textContent = chunk.choices[0]?.delta?.content || '';
if (textContent) {
assistantResponse += textContent;
yield chunk;
}
// Handle tool calls
const toolCalls = chunk.choices[0]?.delta?.tool_calls;
if (toolCalls?.length) {
for (const toolCall of toolCalls) {
yield {
choices: [{
delta: {
content: JSON.stringify({
tool: toolCall.function?.name,
args: JSON.parse(toolCall.function?.arguments || "{}"),
executed: false
}, null, 2),
tool_calls: [{
function: {
name: toolCall.function?.name!,
arguments: toolCall.function?.arguments!
}
}]
}
}]
}
}
const toolResults = await this.executeToolCalls(toolCalls);
for (const result of toolResults) {
autoSummarize = true;
if (result) yield result;
}
}
}
if (assistantResponse.trim()) {
this.addMessage(assistantResponse.trim(), 'assistant');
}
// Check if continuation is needed
const shouldContinue = await this.shouldContinueConversation();
if (shouldContinue.continue && shouldContinue.nextMessage) {
yield* this.chatInternal(shouldContinue.nextMessage, depth + 1, maxDepth, autoSummarize);
} else if (autoSummarize) {
yield* this.generateConversationSummary();
}
}
private async *handleMaxDepthReached(
originalContent: string,
maxDepth: number
): AsyncGenerator<OpenAI.Chat.Completions.ChatCompletionChunk> {
this.logger.warn(`Max depth (${maxDepth}) reached for conversation`);
const contextMessage = `Maximum conversation depth reached. Original request: "${originalContent}". ` +
`Please provide a final response based on the conversation context or indicate if additional input is needed.`;
yield* this.chatCompletionStream({
messages: [...this.messages, { role: "user", content: contextMessage }]
});
}
private async executeToolCalls(
toolCalls: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall[]
): Promise<(ChatChunk | null)[]> {
const toolPromises = toolCalls
.filter(call => call.function?.name && call.function?.arguments)
.map(call => this.executeToolCall(call));
return Promise.all(toolPromises);
}
private async executeToolCall(
toolCall: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall
): Promise<ChatChunk | null> {
const { name, arguments: args } = toolCall.function!;
if(name === "generate_custom_tool"){
const toolArgs = JSON.parse(args || "{}");
const result = await this.generateCustomTool(toolArgs);
return {
choices: [{
delta: {
content: result,
tool_calls: [{ function: { name: name!, arguments: args || "{}" } }]
}
}]
};
}
try {
const toolArgs = JSON.parse(args || "{}");
const result = await this.mcp.callTool({ name: name!, arguments: toolArgs });
const resultContent = this.formatToolResult(name!, toolArgs, result.content);
this.addMessage(resultContent, 'assistant');
return {
choices: [{
delta: {
content: resultContent,
tool_calls: [{ function: { name: name!, arguments: args || "{}" } }]
}
}]
};
} catch (error) {
this.logger.error(`Tool execution failed for ${name}:`, error);
const errorContent = `Tool "${name}" execution failed: ${error instanceof Error ? error.message : 'Unknown error'}`;
return {
choices: [{
delta: {
content: errorContent,
tool_calls: [{ function: { name: name!, arguments: args || "{}" } }]
}
}]
};
}
}
private formatToolResult(name: string, args: Record<string, unknown>, result: unknown): string {
const resJson = {
tool: name,
args,
result,
executed: true
}
return JSON.stringify(resJson, null, 2);
}
private async shouldContinueConversation(): Promise<{ continue: boolean; nextMessage?: string }> {
try {
const tools: OpenAI.Chat.Completions.ChatCompletionTool[] = [{
"type": "function",
"function": {
"name": "evaluate_conversation_status",
"description": "Analyze the current conversation state. \n" +
"Determine if the user's original request has been fully addressed or if additional steps are needed. \n" +
"If more work is required, suggest the next logical step. \n" +
"If there any task/steps remains, suggest the next logical step. \n" +
"Those tasks/steps can be completed by those tools in the next step but first you have to return 'continue': true and 'nextMessage': 'the next message to process if continuation is needed' \n" +
"The tools are: \n" +
this.tools.map((tool, index) => `${index + 1}. ${tool.name}: ${tool.description}`).join('\n'),
"parameters": {
"type": "object",
"properties": {
"continue": {
"type": "boolean",
"description": "Whether the conversation should continue"
},
"nextMessage": {
"type": "string",
"description": "The next message to process if continuation is needed"
}
},
"required": ["continue"]
}
}
}];
const evaluationPrompt = "Have you completed the user's original request? If not, suggest the next logical step.";
let functionArgs = '';
for await (const chunk of this.chatCompletionStream({
messages: [...this.messages, { role: "user", content: evaluationPrompt }],
tools
})) {
const toolCall = chunk.choices[0]?.delta?.tool_calls?.[0];
if (toolCall?.function?.name === "evaluate_conversation_status") {
functionArgs += toolCall.function.arguments || '';
}
}
const result = JSON.parse(functionArgs || '{"continue": false}');
return {
continue: result.continue || false,
nextMessage: result.nextMessage
};
} catch (error) {
this.logger.error('Error evaluating conversation status:', error);
return { continue: false };
}
}
private async *generateConversationSummary(): AsyncGenerator<OpenAI.Chat.Completions.ChatCompletionChunk> {
const summaryPrompt = "Please provide a concise summary of our conversation, " +
"highlighting the key points, decisions made, and any important outcomes." +
`Do not use summary word in the response. ***Just provide the summary for last ${this.recentConversationLength + 1} messages. ***`;
this.logger.debug("messages::", [...this.messages, { role: "user", content: summaryPrompt }]);
yield* this.chatCompletionStream({
messages: [...this.messages, { role: "user", content: summaryPrompt }]
});
}
private addMessage(content: string, role: "user" | "assistant" | "system"): void {
this.messages.push({ role, content });
this.recentConversationLength = this.recentConversationLength + 1;
}
private handleApiError(error: unknown): Error {
if (error instanceof Error) {
if (error.name === 'AbortError') {
return new Error('Request was cancelled');
}
return error;
}
return new Error('Unknown API error occurred');
}
private async generateCustomTool(args: {
tool_name: string;
tool_description: string;
tool_input_schema: Record<string, unknown>;
tool_python_code: string;
}): Promise<string> {
// const tool = {
// name: args.tool_name,
// description: args.tool_description,
// input_schema: args.tool_input_schema
// };
this.functionDeclarations.push({
"type": "function",
"function": {
"name": args.tool_name,
"description": args.tool_description,
"parameters": args.tool_input_schema
}
});
return args.tool_python_code;
}
}

12
src/index.ts Normal file
View File

@ -0,0 +1,12 @@
// Re-export all types
export * from './types';
// Export the main client class
export { ClientMCP } from './client';
// Export utility classes
export { Logger } from './utils/logger';
// Default export for CommonJS/ESM interop
import { ClientMCP } from './client';
export default ClientMCP;

85
src/types/errors.ts Normal file
View File

@ -0,0 +1,85 @@
/**
* Base error class for Gemini MCP client errors
*/
export class ClientMCPError extends Error {
constructor(message: string, public code: string, public details?: unknown) {
super(message);
this.name = 'ClientMCPError';
// Maintains proper stack trace for where our error was thrown
if (Error.captureStackTrace) {
Error.captureStackTrace(this, ClientMCPError);
}
}
}
/**
* Error thrown when API request fails
*/
export class APIError extends ClientMCPError {
constructor(
message: string,
public status: number,
code: string = 'API_ERROR',
details?: unknown
) {
super(message, code, details);
this.name = 'APIError';
}
}
/**
* Error thrown when authentication fails
*/
export class AuthenticationError extends ClientMCPError {
constructor(message: string = 'Authentication failed', details?: unknown) {
super(message, 'AUTH_ERROR', details);
this.name = 'AuthenticationError';
}
}
/**
* Error thrown when a resource is not found
*/
export class NotFoundError extends ClientMCPError {
constructor(resource: string, id?: string) {
const message = id
? `${resource} with ID ${id} not found`
: `${resource} not found`;
super(message, 'NOT_FOUND');
this.name = 'NotFoundError';
}
}
/**
* Error thrown when a request is invalid
*/
export class ValidationError extends ClientMCPError {
constructor(message: string = 'Invalid request', details?: unknown) {
super(message, 'VALIDATION_ERROR', details);
this.name = 'ValidationError';
}
}
/**
* Error thrown when a request times out
*/
export class TimeoutError extends ClientMCPError {
constructor(timeout: number) {
super(`Request timed out after ${timeout}ms`, 'TIMEOUT_ERROR');
this.name = 'TimeoutError';
}
}
/**
* Error thrown when the API rate limit is exceeded
*/
export class RateLimitError extends ClientMCPError {
constructor(
message: string = 'Rate limit exceeded',
public retryAfter?: number
) {
super(message, 'RATE_LIMIT_EXCEEDED');
this.name = 'RateLimitError';
}
}

223
src/types/index.ts Normal file
View File

@ -0,0 +1,223 @@
/**
* Configuration for the MCP client
*/
export interface ClientMCPConfig {
/** OpenAI API key */
apiKey: string;
/** AI model to use (default: "gemini-2.0-flash") */
model?: string;
/** Custom base URL for API requests */
baseUrl?: string;
/** Request timeout in milliseconds (default: 30000) */
timeout?: number;
/** Enable debug logging (default: false) */
debug?: boolean;
/** System messages to initialize the conversation */
systemMessages?: string;
}
/**
* Generic API response wrapper
*/
export interface ApiResponse<T> {
data: T;
error?: string;
metadata?: Record<string, unknown>;
}
/**
* Custom chat chunk for tool execution results
*/
export interface ChatChunk {
choices: Array<{
delta: {
content?: string;
tool_calls?: Array<{
function: {
name: string;
arguments: string;
};
}>;
};
}>;
}
/**
* Result of tool execution
*/
export interface ToolExecutionResult {
success: boolean;
result?: unknown;
error?: string;
toolName: string;
arguments: Record<string, unknown>;
timestamp: Date;
}
/**
* Current state of the conversation
*/
export interface ConversationState {
messageCount: number;
toolsAvailable: number;
isConnected: boolean;
lastActivity: Date;
}
/**
* Options for chat operations
*/
export interface ChatOptions {
/** Maximum recursion depth for tool calls */
maxDepth?: number;
/** Whether to automatically summarize long conversations */
autoSummarize?: boolean;
/** Reasoning effort level for AI responses */
reasoningEffort?: "low" | "medium" | "high";
}
/**
* Tool definition for MCP
*/
export interface MCPTool {
name: string;
description: string;
input_schema: Record<string, unknown>;
}
/**
* Connection status information
*/
export interface ConnectionStatus {
connected: boolean;
serverType: 'stdio' | 'sse' | null;
connectedAt?: Date;
lastError?: string;
}
/**
* Enhanced error information
*/
export interface MCPError extends Error {
code?: string;
details?: Record<string, unknown>;
retryable?: boolean;
}
/**
* Session information for SSE connections
*/
export interface SessionInfo {
id: string;
createdAt: Date;
lastActivity: Date;
metadata?: Record<string, unknown>;
}
/**
* Conversation continuation decision
*/
export interface ContinuationDecision {
continue: boolean;
nextMessage?: string;
reason?: string;
confidence?: number;
}
/**
* Tool call tracking information
*/
export interface ToolCallTracker {
callId: string;
toolName: string;
startTime: Date;
endTime?: Date;
status: 'pending' | 'success' | 'error';
result?: unknown;
error?: string;
}
/**
* Performance metrics
*/
export interface PerformanceMetrics {
totalRequests: number;
successfulRequests: number;
failedRequests: number;
averageResponseTime: number;
toolCallsExecuted: number;
conversationDepth: number;
lastRequestTime?: Date;
}
/**
* Enhanced logging context
*/
export interface LogContext {
sessionId?: string;
conversationId?: string;
userId?: string;
toolName?: string;
metadata?: Record<string, unknown>;
}
/**
* Configuration validation result
*/
export interface ConfigValidationResult {
valid: boolean;
errors: string[];
warnings: string[];
}
/**
* Connection parameters for different transport types
*/
export type ConnectionParams = {
type: 'stdio';
scriptPath: string;
} | {
type: 'sse';
serverUrl: URL;
sessionId?: string;
headers?: Record<string, string>;
};
/**
* Event types for the MCP client
*/
export type MCPClientEvent =
| { type: 'connected'; data: ConnectionStatus }
| { type: 'disconnected'; data: { reason?: string } }
| { type: 'tool_call_started'; data: ToolCallTracker }
| { type: 'tool_call_completed'; data: ToolCallTracker }
| { type: 'conversation_depth_warning'; data: { currentDepth: number; maxDepth: number } }
| { type: 'error'; data: MCPError };
/**
* Callback for handling MCP client events
*/
export type MCPEventHandler = (event: MCPClientEvent) => void;
/**
* Advanced configuration options
*/
export interface AdvancedClientConfig extends ClientMCPConfig {
/** Maximum number of concurrent tool calls */
maxConcurrentToolCalls?: number;
/** Enable automatic conversation summarization */
autoSummarizeThreshold?: number;
/** Custom headers for HTTP requests */
customHeaders?: Record<string, string>;
/** Retry configuration */
retryConfig?: {
maxRetries: number;
initialDelay: number;
maxDelay: number;
backoffMultiplier: number;
};
/** Performance monitoring */
enableMetrics?: boolean;
/** Event handler for client events */
eventHandler?: MCPEventHandler;
}

View File

@ -0,0 +1,57 @@
import {
Tool,
} from "@anthropic-ai/sdk/resources/messages/messages.mjs";
import OpenAI from "openai";
export const convertContext = (tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] => {
return tools.map((tool) => {
const functionDeclaration: OpenAI.Chat.Completions.ChatCompletionTool = {
type: "function",
function: {
name: tool.name,
description: tool.description,
parameters: {
type: "object",
properties: {}, // TODO: Add properties
required: [], // TODO: Add required fields
},
strict: true,
},
};
if (tool.input_schema) {
// for required fields
if (tool.input_schema.required) {
if (functionDeclaration.function.parameters) {
functionDeclaration.function.parameters.required = Array.isArray(tool.input_schema.required)
? tool.input_schema.required
: [];
}
}
}
const properties: {
[key: string]: {
type: "string" | "number" | "integer" | "boolean" | "object" | "array";
description: string;
};
} = {};
if (tool.input_schema?.properties && typeof tool.input_schema.properties === 'object') {
for (const [key, value] of Object.entries(tool.input_schema.properties)) {
properties[key] = {
type: value.type,
description: value.description,
};
}
}
if (functionDeclaration.function.parameters) {
functionDeclaration.function.parameters.properties = properties;
}
return functionDeclaration;
});
}

50
src/utils/logger.ts Normal file
View File

@ -0,0 +1,50 @@
export class Logger {
private static instance: Logger;
private debugMode: boolean;
private prefix: string;
private constructor(debug: boolean = false, prefix: string = 'ClientMCP') {
this.debugMode = debug;
this.prefix = `[${prefix}]:`;
}
/**
* Initialize the logger
*/
public static getInstance(debug: boolean = false, prefix?: string): Logger {
if (!Logger.instance) {
Logger.instance = new Logger(debug, prefix);
}
return Logger.instance;
}
/**
* Log debug message
*/
public debug(...args: unknown[]): void {
if (this.debugMode) {
console.debug(this.prefix, "[debug]:", ...args);
}
}
/**
* Log info message
*/
public info(...args: unknown[]): void {
console.info(this.prefix, "[info]:", ...args);
}
/**
* Log warning message
*/
public warn(...args: unknown[]): void {
console.warn(this.prefix, "[warn]:", ...args);
}
/**
* Log error message
*/
public error(...args: unknown[]): void {
console.error(this.prefix, "[error]:", ...args);
}
}

594
src_old/client.ts Normal file
View File

@ -0,0 +1,594 @@
import { Logger } from './utils/logger';
import {
ClientMCPConfig,
ApiResponse
} from './types';
import OpenAI from "openai";
import {
Tool as AnthropicTool
} from "@anthropic-ai/sdk/resources/messages/messages.mjs";
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
import { convertContext } from "./utils/convertContext";
import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js";
import { ChatChunk } from "./types";
/**
* Main client class for interacting with the Gemini MCP API
*/
export class ClientMCP {
private readonly config: Required<Pick<ClientMCPConfig, 'timeout' | 'debug'>> & {
apiKey: string,
model: string
};
private readonly logger: Logger;
private abortController: AbortController;
private readonly ai: OpenAI;
private mcp: Client;
private transport: StdioClientTransport | SSEClientTransport | null = null;
private tools: AnthropicTool[] = [];
public encoder = new TextEncoder();
public functionDeclarations: OpenAI.Chat.Completions.ChatCompletionTool[] = [];
public messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = [];
public knowledgeBase: string[] = [];
/**
* Create a new MCP client instance
* @param config Configuration options
*/
constructor(config: ClientMCPConfig) {
if (!config.apiKey) {
throw new Error('API key is required');
}
this.config = {
timeout: config.timeout || 30000,
debug: config.debug || false,
apiKey: config.apiKey,
model: config.model || "gemini-2.0-flash",
};
const openai = new OpenAI({
apiKey: config.apiKey,
baseURL: config.baseUrl
});
this.ai = openai;
this.mcp = new Client({ name: "mcp-client-cli", version: "1.0.0" }, { capabilities: { tools: {} } });
this.encoder = new TextEncoder();
this.addToMessages(config.systemMessages || "You are a helpful assistant.", "system");
this.logger = Logger.getInstance(this.config.debug);
this.abortController = new AbortController();
}
/**
* chatCompletion
*/
public async chatCompletion({
messages,
reasoning_effort,
tools,
tool_choice
}: {
messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[]
reasoning_effort?: "low" | "medium" | "high"
tools?: OpenAI.Chat.Completions.ChatCompletionCreateParams['tools'],
tool_choice?: OpenAI.Chat.Completions.ChatCompletionCreateParams['tool_choice']
}): Promise<ApiResponse<OpenAI.Chat.Completions.ChatCompletion & {
_request_id?: string | null;
}>> {
try {
const response = await this.ai.chat.completions.create({
model: this.config.model,
messages,
reasoning_effort: reasoning_effort as OpenAI.Chat.Completions.ChatCompletionCreateParams['reasoning_effort'],
tools,
tool_choice
})
return {
data: response
}
} catch (error) {
this.logger.error('Error in chatCompletion:', error);
throw error;
}
}
/**
* chatCompletionStream by yield
*/
public async *chatCompletionStream({
messages,
reasoning_effort,
tools,
tool_choice
}: {
messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[]
reasoning_effort?: "low" | "medium" | "high"
tools?: OpenAI.Chat.Completions.ChatCompletionCreateParams['tools'],
tool_choice?: OpenAI.Chat.Completions.ChatCompletionCreateParams['tool_choice']
}): AsyncGenerator<OpenAI.Chat.Completions.ChatCompletionChunk> {
try {
const stream = await this.ai.chat.completions.create({
model: this.config.model,
messages,
reasoning_effort: reasoning_effort as OpenAI.Chat.Completions.ChatCompletionCreateParams['reasoning_effort'],
tools,
tool_choice,
stream: true
})
for await (const chunk of stream) {
yield chunk
}
} catch (error) {
this.logger.error('Error in chatCompletionStream:', error);
throw error;
}
}
/**
* Connect to mcp server
*/
public async connectToServer(serverScriptPathOrUrl: string | URL, session_id?: string): Promise<void> {
try {
if (typeof serverScriptPathOrUrl === 'string') {
const isJs = serverScriptPathOrUrl.endsWith(".js");
const isPy = serverScriptPathOrUrl.endsWith(".py");
if (!isJs && !isPy) {
throw new Error("Server script must be a .js or .py file");
}
const command = isPy
? process.platform === "win32"
? "python"
: "python3"
: process.execPath;
this.transport = new StdioClientTransport({
command,
args: [serverScriptPathOrUrl],
});
this.mcp.connect(this.transport);
} else {
// Create a new SSE transport
try {
const newTransport = new SSEClientTransport(
// new URL("/mcp", "http://localhost:3003"),
serverScriptPathOrUrl,
{
requestInit: {
headers: {
Accept: "text/event-stream",
'mcp-session-id': session_id || ""
}
}
}
);
// console.log("Trying SSE MCP...", newTransport)
this.transport = newTransport
await this.mcp.connect(newTransport);
} catch (error) {
this.logger.error("Error connecting to MCP server:", error);
throw error;
}
}
const toolsResult = await this.mcp.listTools();
this.tools = toolsResult.tools.map((tool) => {
this.logger.info("Tool:", JSON.stringify(tool, null, 2));
return {
name: tool.name,
description: tool.description,
input_schema: tool.inputSchema,
};
});
// log the name, description of the tools
this.logger.info("Tools:", this.tools.map((tool) => `${tool.name}: ${tool.description}`).join("\n"));
try {
const functionDeclarations = convertContext(this.tools);
this.functionDeclarations = functionDeclarations;
} catch (error) {
this.logger.error("Error converting tools to function declarations:", error);
}
} catch (e) {
this.logger.error("Failed to connect to MCP server: ", e);
throw e;
}
}
public async *chat(
content: string,
depth: number = 0,
maxDepth: number = 5
): AsyncGenerator<OpenAI.Chat.Completions.ChatCompletionChunk | ChatChunk> {
// Prevent infinite recursion
if (depth >= maxDepth) {
this.logger.error(`Maximum chat depth (${maxDepth}) reached. Stopping recursion.`);
let fullContent = '';
for await (const chunk of this.chatCompletionStream({
messages: this.messages.concat([
{
role: "user",
content: `You have reached the maximum chat depth (${maxDepth}). The main goal was: "${content}". \n` +
`By utilising all the available tools, AI gets all this conversation context and try to complete the main goal but if it fails to complete the main goal. \n` +
`if this content is enough to complete the main goal, then just return the result. or ask for help.`
}
]),
})) {
const content = chunk.choices[0]?.delta?.content || '';
if (content) {
fullContent += content;
yield chunk;
continue;
}
}
this.addToMessages(fullContent, "assistant");
return;
}
this.addToMessages(content, "user");
let fullContent = '';
for await (const chunk of this.chatCompletionStream({
messages: this.messages,
tools: this.functionDeclarations,
})) {
const content = chunk.choices[0]?.delta?.content || '';
if (content) {
fullContent += content;
yield chunk;
continue;
}
// Handle tool calls
const toolCalls = chunk.choices[0]?.delta?.tool_calls;
if (toolCalls?.length) {
// Process tool calls in parallel
const toolPromises = toolCalls.map(toolCall =>
this.handleToolCall(toolCall)
);
// Wait for all tool calls to complete
const toolResults = await Promise.all(toolPromises);
// Process and yield tool results
for (const result of toolResults) {
if (result) {
yield result;
// Add tool result to messages
this.addToMessages(result.choices[0].delta.content!, 'assistant');
// Get explanation for the tool result
// const explanation = await this.getToolExplanation(result.choices[0].delta.content!);
// if (explanation) {
// yield explanation;
// this.addToMessages(explanation.choices[0].delta.content!, 'assistant');
// }
}
}
}
}
// Add the complete assistant's response to messages
const trimmedContent = fullContent.trim();
if (trimmedContent) {
this.addToMessages(trimmedContent, 'assistant');
}
const isExpected = await this.isThisResultExpected();
this.logger.info("isExpected", JSON.stringify(isExpected, null, 2));
if (isExpected.nextUserMessage) {
// send it to *chat
for await (const nextChunk of this.chat(isExpected.nextUserMessage, depth + 1, maxDepth)) {
yield nextChunk;
}
} else {
// is there any need for the Summarize the conversation?
const isSummaryNeeded = await this.isSummaryNeeded();
if (isSummaryNeeded) {
// summarize the conversation
for await (const chunk of this.chatCompletionStream({
messages: this.messages.concat([
{
role: "user",
content: "Summarize the conversation"
}
]),
})) {
yield chunk;
}
}
}
}
private async handleToolCall(toolCall: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall) {
if (!toolCall.function?.name || !toolCall.function?.arguments) {
return null;
}
let toolsArguments: Record<string, unknown>;
try {
toolsArguments = JSON.parse(toolCall.function.arguments || "{}");
} catch (error) {
this.logger.error("Error parsing tool arguments:", error);
return null;
}
try {
const toolResult = await this.mcp.callTool({
name: toolCall.function.name,
arguments: toolsArguments
});
const toolResultString = `ToolCall: ${toolCall.function.name}\n` +
`Arguments: ${JSON.stringify(toolsArguments, null, 2)}\n` +
`Result: ${JSON.stringify(toolResult.content, null, 2)}`;
return {
choices: [{
delta: {
content: toolResultString,
tool_calls: [{
function: {
name: toolCall.function.name,
arguments: toolCall.function.arguments
}
}]
}
}]
};
} catch (error) {
this.logger.error(`Error executing tool ${toolCall.function.name}:`, error);
return {
choices: [{
delta: {
content: `Error executing tool ${toolCall.function.name}: ${error}`,
tool_calls: [{
function: {
name: toolCall.function.name,
arguments: toolCall.function.arguments
}
}]
}
}]
};
}
}
public async getToolExplanation(toolResult: string) {
try {
let explanation = '';
for await (const chunk of this.chatCompletionStream({
messages: [{
role: "user",
content: `Explain this tool result: ${toolResult}`
}],
})) {
const content = chunk.choices[0]?.delta?.content || '';
if (content) {
explanation += content;
}
}
return {
choices: [{
delta: {
content: `\nExplanation: ${explanation}\n`
}
}]
};
} catch (error) {
this.logger.error("Error getting tool explanation:", error);
return null;
}
}
public async isThisResultExpected(): Promise<{
expected: boolean;
nextUserMessage?: string;
}> {
let expected = false;
let nextUserMessage: string | undefined;
try {
const tools = [{
"type": "function",
"function": {
"name": "is_this_result_expected",
"description": "Check if the result is expected, or there is more to be done",
"parameters": {
"type": "object",
"properties": {
"expected": {
"type": "boolean",
"description": "Is the result expected?",
},
},
"required": ["expected"],
},
}
}] as OpenAI.Chat.Completions.ChatCompletionCreateParams['tools'];
let functionArguments = '';
// First check if the result is expected
for await (const chunk of this.chatCompletionStream({
messages: this.messages.concat([
{
role: "user",
content: `This is an internal call. You will return by is_this_result_expected function.` +
`is this result expected or there is more to be done by user for this conversation?`
}
]),
tools,
})) {
const toolCall = chunk.choices[0]?.delta?.tool_calls?.[0];
if (toolCall?.function?.name === "is_this_result_expected" && toolCall.function.arguments) {
functionArguments += toolCall.function.arguments;
}
}
// Parse the function arguments
try {
this.logger.info("functionArguments", functionArguments);
const args = JSON.parse(functionArguments || "{}");
if (args.expected === true) {
expected = true;
}
} catch (error) {
this.logger.error("Error parsing function arguments:", error);
expected = false;
}
if (expected === false) {
// If not expected, get the next user message
const nextMessageTools = [{
"type": "function",
"function": {
"name": "get_user_next_message",
"description": "Get the user's next message",
"parameters": {
"type": "object",
"properties": {
"message": {
"type": "string",
"description": "The user's next message",
},
},
"required": ["message"],
},
},
}] as OpenAI.Chat.Completions.ChatCompletionCreateParams['tools'];
let nextMessageArgs = '';
for await (const chunk of this.chatCompletionStream({
messages: this.messages.concat([
{
role: "user",
content: `This is an internal call. You will return by get_user_next_message function.` +
`What would be the user's next message to get the expected result or to do more?`
}
]),
tools: nextMessageTools,
})) {
const toolCall = chunk.choices[0]?.delta?.tool_calls?.[0];
if (toolCall?.function?.name === "get_user_next_message" && toolCall.function.arguments) {
nextMessageArgs += toolCall.function.arguments;
}
}
// Parse the next message arguments
try {
const args = JSON.parse(nextMessageArgs || "{}");
nextUserMessage = args.message;
} catch (error) {
this.logger.error("Error parsing next message arguments:", error);
nextUserMessage = undefined;
}
}
return {
expected,
nextUserMessage
};
} catch (error) {
this.logger.error("Error in isThisResultExpected:", error);
return { expected: false, nextUserMessage: undefined };
}
}
private async isSummaryNeeded(): Promise<boolean> {
try {
const summaryNeededTools = [{
"type": "function",
"function": {
"name": "is_summary_needed",
"description": "Check if the summary is needed",
"parameters": {
"type": "object",
"properties": {
"summary_needed": {
"type": "boolean",
"description": "Is the summary needed?",
},
},
"required": ["summary_needed"],
},
},
}] as OpenAI.Chat.Completions.ChatCompletionCreateParams['tools'];
let summaryNeeded = false;
for await (const chunk of this.chatCompletionStream({
messages: this.messages.concat([
{
role: "user",
content: `This is an internal call. You will return by is_summary_needed function.` +
`Is there any need for the Summarize the conversation?`
}
]),
tools: summaryNeededTools,
})) {
const toolCall = chunk.choices[0]?.delta?.tool_calls?.[0];
if (toolCall?.function?.name === "is_summary_needed" && toolCall.function.arguments) {
try {
const args = JSON.parse(toolCall.function.arguments || "{}");
summaryNeeded = args.summary_needed;
} catch (error) {
this.logger.error("Error parsing summary needed arguments:", error);
summaryNeeded = false;
}
}
}
return summaryNeeded;
} catch (error) {
this.logger.error("Error in isSummaryNeeded:", error);
return false;
}
}
private addToMessages(content: string, role: "user" | "assistant" | "system") {
this.messages.push({
role,
content,
});
}
/**
* Cancel any ongoing requests
*/
public cancelRequests(): void {
this.abortController.abort();
this.abortController = new AbortController();
}
}

12
src_old/index.ts Normal file
View File

@ -0,0 +1,12 @@
// Re-export all types
export * from './types';
// Export the main client class
export { ClientMCP } from './client';
// Export utility classes
export { Logger } from './utils/logger';
// Default export for CommonJS/ESM interop
import { ClientMCP } from './client';
export default ClientMCP;

85
src_old/types/errors.ts Normal file
View File

@ -0,0 +1,85 @@
/**
* Base error class for Gemini MCP client errors
*/
export class ClientMCPError extends Error {
constructor(message: string, public code: string, public details?: unknown) {
super(message);
this.name = 'ClientMCPError';
// Maintains proper stack trace for where our error was thrown
if (Error.captureStackTrace) {
Error.captureStackTrace(this, ClientMCPError);
}
}
}
/**
* Error thrown when API request fails
*/
export class APIError extends ClientMCPError {
constructor(
message: string,
public status: number,
code: string = 'API_ERROR',
details?: unknown
) {
super(message, code, details);
this.name = 'APIError';
}
}
/**
* Error thrown when authentication fails
*/
export class AuthenticationError extends ClientMCPError {
constructor(message: string = 'Authentication failed', details?: unknown) {
super(message, 'AUTH_ERROR', details);
this.name = 'AuthenticationError';
}
}
/**
* Error thrown when a resource is not found
*/
export class NotFoundError extends ClientMCPError {
constructor(resource: string, id?: string) {
const message = id
? `${resource} with ID ${id} not found`
: `${resource} not found`;
super(message, 'NOT_FOUND');
this.name = 'NotFoundError';
}
}
/**
* Error thrown when a request is invalid
*/
export class ValidationError extends ClientMCPError {
constructor(message: string = 'Invalid request', details?: unknown) {
super(message, 'VALIDATION_ERROR', details);
this.name = 'ValidationError';
}
}
/**
* Error thrown when a request times out
*/
export class TimeoutError extends ClientMCPError {
constructor(timeout: number) {
super(`Request timed out after ${timeout}ms`, 'TIMEOUT_ERROR');
this.name = 'TimeoutError';
}
}
/**
* Error thrown when the API rate limit is exceeded
*/
export class RateLimitError extends ClientMCPError {
constructor(
message: string = 'Rate limit exceeded',
public retryAfter?: number
) {
super(message, 'RATE_LIMIT_EXCEEDED');
this.name = 'RateLimitError';
}
}

67
src_old/types/index.ts Normal file
View File

@ -0,0 +1,67 @@
/**
* Configuration options for the Gemini MCP client
*/
export interface ClientMCPConfig {
/**
* Your Gemini MCP API key
*/
apiKey: string;
/**
* Model name, e.g. "gemini-2.0-flash"
*/
model: string;
/**
* Base URL for the Gemini MCP API (default: "https://generativelanguage.googleapis.com/v1beta/openai")
*/
baseUrl: string;
/**
* Request timeout in milliseconds (default: 30000)
*/
timeout?: number;
/**
* Enable debug logging (default: false)
*/
debug?: boolean;
/**
* system messages
*/
systemMessages?: string;
}
/**
* Standard API response format
*/
export interface ApiResponse<T> {
data?: T;
error?: {
code: string;
message: string;
details?: unknown;
};
}
export type ChatChunk = {
choices: Array<{
delta: {
content?: string;
tool_calls?: Array<{
function: {
name?: string;
arguments?: string;
};
}>;
};
}>;
};
// Export error types
export * from './errors';

View File

@ -0,0 +1,57 @@
import {
Tool,
} from "@anthropic-ai/sdk/resources/messages/messages.mjs";
import OpenAI from "openai";
export const convertContext = (tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] => {
return tools.map((tool) => {
const functionDeclaration: OpenAI.Chat.Completions.ChatCompletionTool = {
type: "function",
function: {
name: tool.name,
description: tool.description,
parameters: {
type: "object",
properties: {}, // TODO: Add properties
required: [], // TODO: Add required fields
},
strict: true,
},
};
if (tool.input_schema) {
// for required fields
if (tool.input_schema.required) {
if (functionDeclaration.function.parameters) {
functionDeclaration.function.parameters.required = Array.isArray(tool.input_schema.required)
? tool.input_schema.required
: [];
}
}
}
const properties: {
[key: string]: {
type: "string" | "number" | "integer" | "boolean" | "object" | "array";
description: string;
};
} = {};
if (tool.input_schema?.properties && typeof tool.input_schema.properties === 'object') {
for (const [key, value] of Object.entries(tool.input_schema.properties)) {
properties[key] = {
type: value.type,
description: value.description,
};
}
}
if (functionDeclaration.function.parameters) {
functionDeclaration.function.parameters.properties = properties;
}
return functionDeclaration;
});
}

50
src_old/utils/logger.ts Normal file
View File

@ -0,0 +1,50 @@
export class Logger {
private static instance: Logger;
private debugMode: boolean;
private prefix: string;
private constructor(debug: boolean = false, prefix: string = 'ClientMCP') {
this.debugMode = debug;
this.prefix = `[${prefix}]:`;
}
/**
* Initialize the logger
*/
public static getInstance(debug: boolean = false, prefix?: string): Logger {
if (!Logger.instance) {
Logger.instance = new Logger(debug, prefix);
}
return Logger.instance;
}
/**
* Log debug message
*/
public debug(...args: unknown[]): void {
if (this.debugMode) {
console.debug(this.prefix, "[debug]:", ...args);
}
}
/**
* Log info message
*/
public info(...args: unknown[]): void {
console.info(this.prefix, "[info]:", ...args);
}
/**
* Log warning message
*/
public warn(...args: unknown[]): void {
console.warn(this.prefix, "[warn]:", ...args);
}
/**
* Log error message
*/
public error(...args: unknown[]): void {
console.error(this.prefix, "[error]:", ...args);
}
}

31
tsconfig.json Normal file
View File

@ -0,0 +1,31 @@
{
"compilerOptions": {
/* Base Options */
"target": "es2018",
"module": "commonjs",
"lib": ["es2018"],
"declaration": true,
"declarationMap": true,
"sourceMap": true,
"outDir": "./dist",
"rootDir": "./src",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"moduleResolution": "node",
"resolveJsonModule": true,
"isolatedModules": true,
"noImplicitAny": true,
"strictNullChecks": true,
"strictFunctionTypes": true,
"strictBindCallApply": true,
"noImplicitThis": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true
},
"include": ["src/**/*"],
"exclude": ["node_modules", "**/*.test.ts"]
}