typetext
{
"max_new_tokens": 1024,
"message": "Write a python function that reads an html file from the internet and extracts the text content of all the h1 elements",
"system_prompt": "Provide answers in Python",
"temperature": 0.8,
"top_k": 50,
"top_p": 0.95
}npm install replicate
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_6J3**********************************
This is your API token. Keep it to yourself.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run nateraw/codellama-13b-instruct using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"nateraw/codellama-13b-instruct:4d4dfb567b910309c9501d56807864fc069ffcd2867552aea073c4b374eef309",
{
input: {
max_new_tokens: 1024,
message: "Write a python function that reads an html file from the internet and extracts the text content of all the h1 elements",
system_prompt: "Provide answers in Python",
temperature: 0.8,
top_k: 50,
top_p: 0.95
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_6J3**********************************
This is your API token. Keep it to yourself.
import replicate
Run nateraw/codellama-13b-instruct using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"nateraw/codellama-13b-instruct:4d4dfb567b910309c9501d56807864fc069ffcd2867552aea073c4b374eef309",
input={
"max_new_tokens": 1024,
"message": "Write a python function that reads an html file from the internet and extracts the text content of all the h1 elements",
"system_prompt": "Provide answers in Python",
"temperature": 0.8,
"top_k": 50,
"top_p": 0.95
}
)
# The nateraw/codellama-13b-instruct model can stream output as it's running.
# The predict method returns an iterator, and you can iterate over that output.
for item in output:
# https://replicate.com/nateraw/codellama-13b-instruct/api#output-schema
print(item, end="")
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_6J3**********************************
This is your API token. Keep it to yourself.
Run nateraw/codellama-13b-instruct using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "nateraw/codellama-13b-instruct:4d4dfb567b910309c9501d56807864fc069ffcd2867552aea073c4b374eef309",
"input": {
"max_new_tokens": 1024,
"message": "Write a python function that reads an html file from the internet and extracts the text content of all the h1 elements",
"system_prompt": "Provide answers in Python",
"temperature": 0.8,
"top_k": 50,
"top_p": 0.95
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Here is an example of a Python function that reads an HTML file from the internet and extracts the text content of all the h1 elements:
```
import requests
from bs4 import BeautifulSoup
def get_h1_text(url):
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
h1_elements = soup.find_all('h1')
h1_text = []
for h1 in h1_elements:
h1_text.append(h1.text.strip())
return h1_text
```
This function uses the `requests` library to make an HTTP GET request to the specified URL, and the `BeautifulSoup` library to parse the HTML response. The function then uses the `find_all()` method of the `BeautifulSoup` object to find all the `h1` elements in the HTML document, and appends the text content of each `h1` element to a list called `h1_text`. Finally, the function returns the `h1_text` list.
Here is an example of how you could use this function:
```
url = 'https://www.example.com'
h1_text = get_h1_text(url)
print(h1_text)
```
This would print a list of all the text content of the `h1` elements in the HTML document at the specified URL.{
"id": "s73gc7tbtjlvzljf4z5ld4kjoi",
"model": "nateraw/codellama-13b-instruct",
"version": "4d4dfb567b910309c9501d56807864fc069ffcd2867552aea073c4b374eef309",
"input": {
"max_new_tokens": 1024,
"message": "Write a python function that reads an html file from the internet and extracts the text content of all the h1 elements",
"system_prompt": "Provide answers in Python",
"temperature": 0.8,
"top_k": 50,
"top_p": 0.95
},
"logs": "Setting `pad_token_id` to `eos_token_id`:2 for open-end generation.",
"output": [
"",
" ",
"Here ",
"is ",
"an ",
"example ",
"of ",
"a ",
"Python ",
"function ",
"that ",
"reads ",
"an ",
"HTML ",
"file ",
"from ",
"the ",
"internet ",
"and ",
"",
"extracts ",
"the ",
"text ",
"content ",
"of ",
"all ",
"the ",
"",
"h1 ",
"",
"elements:\n",
"",
"```\n",
"",
"import ",
"requests\n",
"",
"from ",
"",
"bs4 ",
"import ",
"",
"",
"BeautifulSoup\n",
"\n",
"",
"def ",
"",
"",
"",
"",
"",
"",
"",
"",
"get_h1_text(url):\n",
" ",
" ",
"response ",
"= ",
"",
"",
"",
"",
"",
"requests.get(url)\n",
" ",
" ",
"soup ",
"= ",
"",
"",
"",
"",
"",
"",
"",
"BeautifulSoup(response.content, ",
"",
"",
"",
"",
"'html.parser')\n",
" ",
" ",
"",
"",
"",
"h1_elements ",
"= ",
"",
"",
"",
"",
"",
"",
"",
"",
"soup.find_all('h1')\n",
" ",
" ",
"",
"",
"",
"h1_text ",
"= ",
"[]\n",
" ",
" ",
"for ",
"",
"h1 ",
"in ",
"",
"",
"",
"",
"h1_elements:\n",
" ",
" ",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"h1_text.append(h1.text.strip())\n",
" ",
" ",
"return ",
"",
"",
"",
"h1_text\n",
"",
"```\n",
"",
"This ",
"function ",
"uses ",
"the ",
"",
"",
"`requests` ",
"library ",
"to ",
"make ",
"an ",
"HTTP ",
"GET ",
"request ",
"to ",
"the ",
"specified ",
"",
"URL, ",
"and ",
"the ",
"",
"",
"",
"",
"",
"",
"`BeautifulSoup` ",
"library ",
"to ",
"parse ",
"the ",
"HTML ",
"",
"response. ",
"The ",
"function ",
"then ",
"uses ",
"the ",
"",
"",
"",
"",
"`find_all()` ",
"method ",
"of ",
"the ",
"",
"",
"",
"",
"",
"",
"`BeautifulSoup` ",
"object ",
"to ",
"find ",
"all ",
"the ",
"",
"",
"",
"`h1` ",
"elements ",
"in ",
"the ",
"HTML ",
"",
"document, ",
"and ",
"",
"appends ",
"the ",
"text ",
"content ",
"of ",
"each ",
"",
"",
"",
"`h1` ",
"element ",
"to ",
"a ",
"list ",
"called ",
"",
"",
"",
"",
"",
"`h1_text`. ",
"",
"Finally, ",
"the ",
"function ",
"returns ",
"the ",
"",
"",
"",
"",
"",
"`h1_text` ",
"",
"list.\n",
"\n",
"",
"Here ",
"is ",
"an ",
"example ",
"of ",
"how ",
"you ",
"could ",
"use ",
"this ",
"",
"function:\n",
"",
"```\n",
"",
"url ",
"= ",
"",
"",
"",
"",
"",
"",
"",
"",
"'https://www.example.com'\n",
"",
"",
"",
"",
"h1_text ",
"= ",
"",
"",
"",
"",
"",
"",
"",
"",
"get_h1_text(url)\n",
"",
"",
"",
"",
"",
"",
"",
"print(h1_text)\n",
"",
"```\n",
"",
"This ",
"would ",
"print ",
"a ",
"list ",
"of ",
"all ",
"the ",
"text ",
"content ",
"of ",
"the ",
"",
"",
"",
"`h1` ",
"elements ",
"in ",
"the ",
"HTML ",
"document ",
"at ",
"the ",
"specified ",
"",
"",
"URL."
],
"data_removed": false,
"error": null,
"source": "web",
"status": "succeeded",
"created_at": "2023-09-28T08:48:38.698898Z",
"started_at": "2023-09-28T08:50:26.60617Z",
"completed_at": "2023-09-28T08:51:09.986787Z",
"urls": {
"cancel": "https://api.replicate.com/v1/predictions/s73gc7tbtjlvzljf4z5ld4kjoi/cancel",
"get": "https://api.replicate.com/v1/predictions/s73gc7tbtjlvzljf4z5ld4kjoi"
},
"metrics": {
"predict_time": 43.380617,
"total_time": 151.287889
}
}