Skip to content

Instantly share code, notes, and snippets.

View mrmaheshrajput's full-sized avatar
:shipit:

Mahesh Rajput mrmaheshrajput

:shipit:
View GitHub Profile
from datetime import datetime
import json
from typing import Any, Dict, List
import boto3
from botocore.exceptions import ClientError
# Initialize a Boto3 session and create a Bedrock runtime client
session = boto3.Session()
region = "us-east-1" # us-west-2 has better runtime quota
import io
import sagemaker
import boto3
import json
# Your IAM role that provides access to SageMaker and S3.
# See https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-ex-role.html
# if running on a SageMaker notebook or directly use
# sagemaker.get_execution_role() if running on SageMaker studio
import io
import sagemaker
import boto3
import json
# Change this to your role
iam_role = "arn:aws:iam::1111111111:role/service-role/AmazonSageMaker-ExecutionRole-00000000T000000"
sagemaker_session = sagemaker.session.Session()
region = sess._region_name
smr_client = boto3.client("sagemaker-runtime")
from sagemaker.huggingface.model import HuggingFaceModel
ENDPOINT_NAME = "sbert-embeddings-minilml6" # Change this as desired
role = "" # SageMaker execution role ARN
hub = {
"HF_MODEL_ID": "sentence-transformers/all-MiniLM-L6-v2", # Change to your model
echo("Creating layer/python directory")
mkdir -p layer/python
echo("Installing cpu only pytorch")
pip install \
--target layer/python torch torchvision torchaudio \
--extra-index-url https://download.pytorch.org/whl/cpu
echo("Installing sentence transformer dependencies")
pip install --target layer/python \
!pip install sagemaker -q
from sagemaker.jumpstart.model import JumpStartModel
model_id, model_version, = (
"huggingface-llm-falcon-7b-instruct-bf16",
"*",
)
my_model = JumpStartModel(model_id=model_id)
!pip install -U sagemaker -q
import boto3
import sagemaker
import sagemaker.session
session = sagemaker.session.Session()
region = session.boto_region_name
role = sagemaker.get_execution_role()
bucket = session.default_bucket()
delimiter = "####"
eval_system_prompt = f"""You are an assistant that evaluates \
whether or not an assistant is producing valid quizzes.
The assistant should be producing output in the \
format of Question N:{delimiter} <question N>?"""
llm_response = """
Question 1:#### What is the largest telescope in space called and what material is its mirror made of?
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema.output_parser import StrOutputParser
chat_prompt = ChatPromptTemplate.from_messages([("human", prompt_template)])
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
output_parser = StrOutputParser()
chain = chat_prompt | llm | output_parser
import evaluate
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from datasets import load_dataset
# Load the fine-tuned model and tokenizer
model_name = "your-fine-tuned-model-name"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
# Load the test dataset