Skip to content

Instantly share code, notes, and snippets.

View idontcalculate's full-sized avatar
🎯
Focusing

Sirius1389 idontcalculate

🎯
Focusing
View GitHub Profile
use io_uring::{IoUring, opcode, types::Fd};
use std::net::SocketAddr;
use std::os::fd::AsRawFd;
use tokio::net::{TcpListener, TcpStream};
use std::io::Result;
async fn handle_client(stream: TcpStream, ring: &mut IoUring) -> Result<()> {
let mut buf = [0; 1024];
// Prepare a read operation
#[tokio::main]
async fn main() -> Result<()> {
let addr = "127.0.0.1:8080".parse::<SocketAddr>().unwrap();
let listener = TcpListener::bind(addr).await.unwrap();
let mut ring = IoUring::new(256)?;
loop {
let (stream, _) = listener.accept().await?;
handle_client(stream, &mut ring).await?;
}
[package]
name = "io_server"
version = "0.1.0"
edition = "2021"
[dependencies]
tokio = { version = "1", features = ["full"] }
io-uring = "0.5"
[[bin]]
from query import search_books
def main():
query_text = "carl sagan, cosmos"
results = search_books(query_text)
if not results:
print("No results found")
else:
for point in results:
import os
from qdrant_client import QdrantClient
from fastembed import TextEmbedding
from dotenv import load_dotenv
from qdrant_client.http.models import Distance, VectorParams
# Load environment variables from a .env file
load_dotenv()
# Access the environment variables
import os
import csv
from concurrent.futures import ThreadPoolExecutor
from qdrant_client import QdrantClient
from qdrant_client.http.models import PointStruct
from fastembed import TextEmbedding
from dotenv import load_dotenv
# Load environment variables from a .env file
load_dotenv()
from config import COLLECTION_NAME
import os
import csv
from qdrant_client.http.models import VectorParams, PointStruct
from dotenv import load_dotenv
from generate_embedding import get_book_vector
from connect_qdrant import get_qdrant_client
load_dotenv()
qdrant_client = get_qdrant_client()
from fastembed.embedding import TextEmbedding
def get_book_vector(book_data):
embedder = TextEmbedding(model_name="BAAI/bge-base-en")
text = f"{book_data['title']} {book_data['description']}"
vector = list(embedder.embed(text))
return vector[0] # Since embed returns a generator, we convert it to a list and take the first item.
def get_query_vector(query_text):
embedder = TextEmbedding(model_name="BAAI/bge-base-en")
from qdrant_client import QdrantClient
from qdrant_client.http.models import Distance, VectorParams
from config import QDRANT_API_KEY, QDRANT_URL
def get_qdrant_client():
qdrant_client = QdrantClient(
url=QDRANT_URL,
api_key=QDRANT_API_KEY,
)
# Ensure the collection exists
@idontcalculate
idontcalculate / vggtunedgr.py
Created May 17, 2024 20:07
gradio wrapper around predictions
import tensorflow as tf
from tensorflow.keras.models import load_model
import gradio as gr
import numpy as np
from PIL import Image
# Load the saved model
model = load_model("modelVGG16.h5")
# Define the prediction function