Class: ReplicateLLM
Replicate LLM implementation used
Extends
BaseLLM
Constructors
new ReplicateLLM()
new ReplicateLLM(
init
?):ReplicateLLM
Parameters
• init?: Partial
<ReplicateLLM
> & object
Returns
Overrides
BaseLLM.constructor
Source
packages/llamaindex/src/llm/replicate_ai.ts:115
Properties
chatStrategy
chatStrategy:
ReplicateChatStrategy
Source
packages/llamaindex/src/llm/replicate_ai.ts:109
maxTokens?
optional
maxTokens:number
Source
packages/llamaindex/src/llm/replicate_ai.ts:112
model
model:
"Llama-2-70b-chat-old"
|"Llama-2-70b-chat-4bit"
|"Llama-2-13b-chat-old"
|"Llama-2-13b-chat-4bit"
|"Llama-2-7b-chat-old"
|"Llama-2-7b-chat-4bit"
|"llama-3-70b-instruct"
|"llama-3-8b-instruct"
Source
packages/llamaindex/src/llm/replicate_ai.ts:108
replicateSession
replicateSession:
ReplicateSession
Source
packages/llamaindex/src/llm/replicate_ai.ts:113
temperature
temperature:
number
Source
packages/llamaindex/src/llm/replicate_ai.ts:110
topP
topP:
number
Source
packages/llamaindex/src/llm/replicate_ai.ts:111
Accessors
metadata
get
metadata():object
Returns
object
contextWindow
contextWindow:
number
maxTokens
maxTokens:
undefined
|number
model
model:
"Llama-2-70b-chat-old"
|"Llama-2-70b-chat-4bit"
|"Llama-2-13b-chat-old"
|"Llama-2-13b-chat-4bit"
|"Llama-2-7b-chat-old"
|"Llama-2-7b-chat-4bit"
|"llama-3-70b-instruct"
|"llama-3-8b-instruct"
temperature
temperature:
number
tokenizer
tokenizer:
undefined
=undefined
topP
topP:
number
Source
packages/llamaindex/src/llm/replicate_ai.ts:140
Methods
chat()
chat(params)
chat(
params
):Promise
<AsyncIterable
<ChatResponseChunk
>>
Parameters
• params: LLMChatParamsStreaming
<object
, object
>