technillogue/cog-triton-webrtc
        
          
            
          
          Public
        
      
      
        
          
            
          
          0
            runs
          
        
        
          Run technillogue/cog-triton-webrtc with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
            
              
                
              
            
            Input schema
          
          The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
| Field | Type | Default value | Description | 
|---|---|---|---|
| prompt | 
            string
            
           | 
            None
           | |
| system_prompt | 
            string
            
           | 
            None
           | |
| max_new_tokens | 
            integer
            
           | 
              250
             | 
            None
           | 
| min_length | 
            integer
            
           | 
            None
           | |
| top_k | 
            integer
            
           | 
              0
             | 
            None
           | 
| top_p | 
            number
            
           | 
              0
             | 
            None
           | 
| temperature | 
            number
            
           | 
              1
             | 
            None
           | 
| length_penalty | 
            number
            
           | 
              1
             | 
            None
           | 
| presence_penalty | 
            number
            
           | 
              0
             | 
            None
           | 
| stop_words | 
            string
            
           | 
            None
           | |
| prompt_template | 
            string
            
           | 
            None
           | 
{
  "type": "object",
  "title": "Input",
  "required": [
    "prompt"
  ],
  "properties": {
    "top_k": {
      "type": "integer",
      "title": "Top K",
      "default": 0,
      "x-order": 4
    },
    "top_p": {
      "type": "number",
      "title": "Top P",
      "default": 0,
      "x-order": 5
    },
    "prompt": {
      "type": "string",
      "title": "Prompt",
      "x-order": 0
    },
    "min_length": {
      "type": "integer",
      "title": "Min Length",
      "x-order": 3
    },
    "stop_words": {
      "type": "string",
      "title": "Stop Words",
      "x-order": 9
    },
    "temperature": {
      "type": "number",
      "title": "Temperature",
      "default": 1,
      "x-order": 6
    },
    "system_prompt": {
      "type": "string",
      "title": "System Prompt",
      "x-order": 1
    },
    "length_penalty": {
      "type": "number",
      "title": "Length Penalty",
      "default": 1,
      "x-order": 7
    },
    "max_new_tokens": {
      "type": "integer",
      "title": "Max New Tokens",
      "default": 250,
      "x-order": 2
    },
    "prompt_template": {
      "type": "string",
      "title": "Prompt Template",
      "x-order": 10
    },
    "presence_penalty": {
      "type": "number",
      "title": "Presence Penalty",
      "default": 0,
      "x-order": 8
    }
  }
}
            
              
                
              
            
            Output schema
          
        The shape of the response you’ll get when you run this model with an API.
              Schema
            
            {
  "type": "array",
  "items": {
    "type": "string"
  },
  "title": "Output",
  "x-cog-array-type": "iterator",
  "x-cog-array-display": "concatenate"
}