AI on demand: MinerU: Difference between revisions

From MediaWiki
Jump to navigation Jump to search
No edit summary
 
Line 1: Line 1:
== Calling the model ==
== Calling the model ==
<syntaxhighlight lang="bash">
<syntaxhighlight lang="bash">
# Set your personal key
# Set your personal key:
STONEY_KEY=sk-...
STONEY_KEY=sk-...


# Set the desired model
# Set the desired model:
MODEL_ID=MinerU2.5-2509-1.2B
MODEL_ID=MinerU2.5-2509-1.2B
# Set your prompt:
PROMPT='Describe an imaginary document.'
# Set maximum amount of tokens:
MAX_TOKENS=2000


curl -s https://llm.stoney-cloud.com/v1/chat/completions \
curl -s https://llm.stoney-cloud.com/v1/chat/completions \
Line 13: Line 19:
     "model": "'"$MODEL_ID"'",
     "model": "'"$MODEL_ID"'",
     "messages": [
     "messages": [
       {"role": "user", "content": "Describe an imaginary document."}
       {"role": "user", "content": "'"$PROMPT"'"}
     ],
     ],
     "max_tokens": 2000
     "max_tokens": '"$MAX_TOKENS"'
   }' \
   }' \
   | jq .
   | jq .

Latest revision as of 16:59, 2 April 2026

Calling the model

# Set your personal key:
STONEY_KEY=sk-...

# Set the desired model:
MODEL_ID=MinerU2.5-2509-1.2B

# Set your prompt:
PROMPT='Describe an imaginary document.'

# Set maximum amount of tokens:
MAX_TOKENS=2000

curl -s https://llm.stoney-cloud.com/v1/chat/completions \
  -H "Authorization: Bearer $STONEY_KEY" \
  -H "Content-Type: application/json" \
  -d '{
    "model": "'"$MODEL_ID"'",
    "messages": [
      {"role": "user", "content": "'"$PROMPT"'"}
    ],
    "max_tokens": '"$MAX_TOKENS"'
  }' \
  | jq .

Example output:

{
  "id": "chatcmpl-8e804087bd0f6e64",
  "object": "chat.completion",
  "created": 1774862972,
  "model": "MinerU2.5-2509-1.2B",
  "choices": [
    {
      "index": 0,
      "message": {
        "role": "assistant",
        "content": "Describe an imaginary document.",
        "refusal": null,
        "annotations": null,
        "audio": null,
        "function_call": null,
        "tool_calls": [],
        "reasoning": null,
        "reasoning_content": null
      },
      "logprobs": null,
      "finish_reason": "stop",
      "stop_reason": null,
      "token_ids": null
    }
  ],
  "service_tier": null,
  "system_fingerprint": null,
  "usage": {
    "prompt_tokens": 24,
    "total_tokens": 30,
    "completion_tokens": 6,
    "prompt_tokens_details": null
  },
  "prompt_logprobs": null,
  "prompt_token_ids": null,
  "kv_transfer_params": null
}