from safetensors import safe_open
safetensors_file =
with safe_open(safetensors_file, framework="pt") as f:
tensor_name = f.keys()
print(f"tensor list {tensor_name}")
for key in tensor_name:
tensor = f.get_tensor(key)
print(f"tensor name {key} ์ ๋ฐ์ดํฐํ์ : {tensor.dtype}")
print(f"tensor name {key} ์ shape : {tensor.shape}")
tensor list ['thinker.model.layers.5.mlp.gate_proj.weight', 'thinker.model.layers.5.mlp.up_proj.weight', 'thinker.model.layers.5.post_attention_layernorm.weight', 'thinker.model.layers.5.self_attn.k_norm.weight', 'thinker.model.layers.5.self_attn.k_proj.weight', 'thinker.model.layers.5.self_attn.o_proj.weight', 'thinker.model.layers.5.self_attn.q_norm.weight', 'thinker.model.layers.5.self_attn.q_proj.weight', 'thinker.model.layers.5.self_attn.v_proj.weight', 'thinker.model.layers.6.input_layernorm.weight', 'thinker.model.layers.6.mlp.down_proj.weight', 'thinker.model.layers.6.mlp.gate_proj.weight', 'thinker.model.layers.6.mlp.up_proj.weight', 'thinker.model.layers.6.post_attention_layernorm.weight', 'thinker.model.layers.6.self_attn.k_norm.weight', 'thinker.model.layers.6.self_attn.k_proj.weight', 'thinker.model.layers.6.self_attn.o_proj.weight', 'thinker.model.layers.6.self_attn.q_norm.weight', 'thinker.model.layers.6.self_attn.q_proj.weight', 'thinker.model.layers.6.self_attn.v_proj.weight', 'thinker.model.layers.7.input_layernorm.weight', 'thinker.model.layers.7.mlp.down_proj.weight', 'thinker.model.layers.7.mlp.gate_proj.weight', 'thinker.model.layers.7.mlp.up_proj.weight', 'thinker.model.layers.7.post_attention_layernorm.weight', 'thinker.model.layers.7.self_attn.k_norm.weight', 'thinker.model.layers.7.self_attn.k_proj.weight', 'thinker.model.layers.7.self_attn.o_proj.weight', 'thinker.model.layers.7.self_attn.q_norm.weight', 'thinker.model.layers.7.self_attn.q_proj.weight', 'thinker.model.layers.7.self_attn.v_proj.weight', 'thinker.model.layers.8.input_layernorm.weight', 'thinker.model.layers.8.mlp.down_proj.weight', 'thinker.model.layers.8.mlp.gate_proj.weight', 'thinker.model.layers.8.mlp.up_proj.weight', 'thinker.model.layers.8.post_attention_layernorm.weight', 'thinker.model.layers.8.self_attn.k_norm.weight', 'thinker.model.layers.8.self_attn.k_proj.weight', 'thinker.model.layers.8.self_attn.o_proj.weight', 'thinker.model.layers.8.self_attn.q_norm.weight', 'thinker.model.layers.8.self_attn.q_proj.weight', 'thinker.model.layers.8.self_attn.v_proj.weight', 'thinker.model.layers.9.input_layernorm.weight', 'thinker.model.layers.9.mlp.down_proj.weight', 'thinker.model.layers.9.mlp.gate_proj.weight', 'thinker.model.layers.9.mlp.up_proj.weight', 'thinker.model.layers.9.post_attention_layernorm.weight', 'thinker.model.layers.9.self_attn.k_norm.weight', 'thinker.model.layers.9.self_attn.k_proj.weight', 'thinker.model.layers.9.self_attn.o_proj.weight', 'thinker.model.layers.9.self_attn.q_norm.weight', 'thinker.model.layers.9.self_attn.q_proj.weight', 'thinker.model.layers.9.self_attn.v_proj.weight', 'thinker.model.norm.weight']
tensor name thinker.model.layers.5.mlp.gate_proj.weight ์ ๋ฐ์ดํฐํ์ : torch.bfloat16
tensor name thinker.model.layers.5.mlp.gate_proj.weight ์ shape : torch.Size([6144, 2048])
tensor name thinker.model.layers.5.mlp.up_proj.weight ์ ๋ฐ์ดํฐํ์ : torch.bfloat16
tensor name thinker.model.layers.5.mlp.up_proj.weight ์ shape : torch.Size([6144, 2048])
tensor name thinker.model.layers.5.post_attention_layernorm.weight ์ ๋ฐ์ดํฐํ์ : torch.bfloat16
tensor name thinker.model.layers.5.post_attention_layernorm.weight ์ shape : torch.Size([2048])
tensor name thinker.model.layers.5.self_attn.k_norm.weight ์ ๋ฐ์ดํฐํ์ : torch.bfloat16
tensor name thinker.model.layers.5.self_attn.k_norm.weight ์ shape : torch.Size([128])
tensor name thinker.model.layers.5.self_attn.k_proj.weight ์ ๋ฐ์ดํฐํ์ : torch.bfloat16
tensor name thinker.model.layers.5.self_attn.k_proj.weight ์ shape : torch.Size([1024, 2048])
tensor name thinker.model.layers.5.self_attn.o_proj.weight ์ ๋ฐ์ดํฐํ์ : torch.bfloat16
tensor name thinker.model.layers.5.self_attn.o_proj.weight ์ shape : torch.Size([2048, 2048])
tensor name thinker.model.layers.5.self_attn.q_norm.weight ์ ๋ฐ์ดํฐํ์ : torch.bfloat16
tensor name thinker.model.layers.5.self_attn.q_norm.weight ์ shape : torch.Size([128])
tensor name thinker.model.layers.5.self_attn.q_proj.weight ์ ๋ฐ์ดํฐํ์ : torch.bfloat16
tensor name thinker.model.layers.5.self_attn.q_proj.weight ์ shape : torch.Size([2048, 2048])
tensor name thinker.model.layers.5.self_attn.v_proj.weight ์ ๋ฐ์ดํฐํ์ : torch.bfloat16
tensor name thinker.model.layers.5.self_attn.v_proj.weight ์ shape : torch.Size([1024, 2048])
tensor name thinker.model.layers.6.input_layernorm.weight ์ ๋ฐ์ดํฐํ์ : torch.bfloat16
tensor name thinker.model.layers.6.input_layernorm.weight ์ shape : torch.Size([2048])
tensor name thinker.model.layers.6.mlp.down_proj.weight ์ ๋ฐ์ดํฐํ์ : torch.bfloat16
tensor name thinker.model.layers.6.mlp.down_proj.weight ์ shape : torch.Size([2048, 6144])
tensor name thinker.model.layers.6.mlp.gate_proj.weight ์ ๋ฐ์ดํฐํ์ : torch.bfloat16
tensor name thinker.model.layers.6.mlp.gate_proj.weight ์ shape : torch.Size([6144, 2048])
tensor name thinker.model.layers.6.mlp.up_proj.weight ์ ๋ฐ์ดํฐํ์ : torch.bfloat16
tensor name thinker.model.layers.6.mlp.up_proj.weight ์ shape : torch.Size([6144, 2048])
tensor name thinker.model.layers.6.post_attention_layernorm.weight ์ ๋ฐ์ดํฐํ์ : torch.bfloat16
@dataclass(init=False)
class ToolStrategy(Generic[SchemaT]):
"""Use a tool calling strategy for model responses."""
schema: type[SchemaT]
"""Schema for the tool calls."""
schema_specs: list[_SchemaSpec[SchemaT]]
"""Schema specs for the tool calls."""
tool_message_content: str | None
"""The content of the tool message to be returned when the model calls
an artificial structured output tool."""
handle_errors: (
bool | str | type[Exception] | tuple[type[Exception], ...] | Callable[[Exception], str]
)
elif method == "json_schema":
if schema is None:
msg = (
"schema must be specified when method is not 'json_mode'. "
"Received None."
)
raise ValueError(msg)
if is_pydantic_schema:
schema = cast("TypeBaseModel", schema)
if issubclass(schema, BaseModelV1):
response_format = schema.schema()
else:
response_format = schema.model_json_schema()
llm = self.bind(
format=response_format,
ls_structured_output_format={
"kwargs": {"method": method},
"schema": schema,
},
)
output_parser = PydanticOutputParser(pydantic_object=schema) # type: ignore[arg-type]
##bind example
"""
Example:
```python
from langchain_ollama import ChatOllama
from langchain_core.output_parsers import StrOutputParser
model = ChatOllama(model="llama3.1")
# Without bind
chain = model | StrOutputParser()
chain.invoke("Repeat quoted words exactly: 'One two three four five.'")
# Output is 'One two three four five.'
# With bind
chain = model.bind(stop=["three"]) | StrOutputParser()
chain.invoke("Repeat quoted words exactly: 'One two three four five.'")
# Output is 'One two'
"""
@dataclass(init=False)
class ProviderStrategy(Generic[SchemaT]):
"""Use the model provider's native structured output method."""
schema: type[SchemaT]
"""Schema for native mode."""
schema_spec: _SchemaSpec[SchemaT]
"""Schema spec for native mode."""
import sys
n,m = map(int,input().split(' '))
pocketmon_list = dict()
rev_poecketmon_list = dict()
cnt=1
for i in range(0,n):
name = sys.stdin.readline().strip()
pocketmon_list[str(cnt)] = name
rev_poecketmon_list[name] = str(cnt)
cnt+=1
for i in range(0,m):
tmp_input = sys.stdin.readline().strip()
if tmp_input.isdigit():
print(pocketmon_list[tmp_input])
else:
print(rev_poecketmon_list[tmp_input])