import Portkey from 'portkey-ai';
const portkey = new Portkey({
apiKey: "PORTKEY_API_KEY",
// ***********************************
config: "pp-mistral-cache-xx"
// ***********************************
})
async function main(){
const response = await portkey.chat.completions.create({
model: "mistral-tiny",
messages: [{ role: 'user', content: "c'est la vie" }]
});
}
main()
3. Collect Feedback
Gather weighted feedback from users and improve your app:
from portkey import Portkey
portkey = Portkey(
api_key="PORTKEY_API_KEY"
)
def send_feedback():
portkey.feedback.create(
'trace_id'= 'REQUEST_TRACE_ID',
'value'= 0 # For thumbs down
)
send_feedback()
import Portkey from 'portkey-ai';
const portkey = new Portkey({
apiKey: "PORTKEY_API_KEY"
});
const sendFeedback = async () => {
await portkey.feedback.create({
traceID: "REQUEST_TRACE_ID",
value: 1 // For thumbs up
});
}
await sendFeedback();
Conclusion
Integrating Portkey with Mistral helps you build resilient LLM apps from the get-go. With features like semantic caching, observability, load balancing, feedback, and fallbacks, you can ensure optimal performance and continuous improvement.
For more on Configs and other gateway feature like Load Balancing,