sebasgar commited on
Commit
a137b29
·
verified ·
1 Parent(s): 0a1b9a3

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +25 -19
README.md CHANGED
@@ -106,35 +106,41 @@ Model
106
  Garden](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/hear).
107
 
108
  ```python
 
 
 
 
 
 
 
109
 
110
- ! git clone https://github.com/Google-Health/hear.git
111
- ! pip install --upgrade --quiet transformers==4.50.3
112
-
 
 
 
113
 
114
- import torch
115
- from transformers import AutoModel
 
 
116
 
117
  from huggingface_hub.utils import HfFolder
118
- from huggingface_hub import notebook_login, from_pretrained_keras, notebook_login
119
  if HfFolder.get_token() is None:
120
  notebook_login()
121
 
122
- import importlib
123
- audio_utils = importlib.import_module(
124
- "hear.python.data_processing.audio_utils"
125
- )
126
- preprocess_audio = audio_utils.preprocess_audio
127
 
128
- model = AutoModel.from_pretrained("google/hear-pytorch")
 
 
 
 
129
 
130
- # Generate 4 Examples of two-second random audio clips
131
  raw_audio_batch = torch.rand((4, 32000), dtype=torch.float32)
132
- spectrogram_batch = preprocess_audio(raw_audio_batch)
133
-
134
- # Perform Inference to obtain HeAR embeddings
135
- # There are 4 embeddings each with length 512 corresponding to the 4 inputs
136
- embedding_batch = model.forward(
137
- spectrogram_batch, return_dict=True, output_hidden_states=True)
138
  ```
139
 
140
  ### Examples
 
106
  Garden](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/hear).
107
 
108
  ```python
109
+ import torch
110
+ from fcv_detector.models.hear import (
111
+ HearConfig,
112
+ HearModel,
113
+ HearForAudioClassification,
114
+ HearFeatureExtractor
115
+ )
116
 
117
+ from transformers import (
118
+ AutoConfig,
119
+ AutoModel,
120
+ AutoModelForAudioClassification,
121
+ AutoFeatureExtractor,
122
+ )
123
 
124
+ AutoConfig.register("hear", HearConfig)
125
+ AutoModel.register(HearConfig, HearModel)
126
+ AutoModelForAudioClassification.register(HearConfig, HearForAudioClassification)
127
+ AutoFeatureExtractor.register(HearConfig, HearFeatureExtractor)
128
 
129
  from huggingface_hub.utils import HfFolder
130
+ from huggingface_hub import notebook_login
131
  if HfFolder.get_token() is None:
132
  notebook_login()
133
 
 
 
 
 
 
134
 
135
+ model_id = "audiblehealthai/hear-pytorch"
136
+ model = HearModel.from_pretrained(model_id)
137
+ fe = HearFeatureExtractor.from_pretrained(
138
+ model_id
139
+ )
140
 
 
141
  raw_audio_batch = torch.rand((4, 32000), dtype=torch.float32)
142
+ inputs = fe(raw_audio_batch, return_tensors="pt")
143
+ output = model(**inputs)
 
 
 
 
144
  ```
145
 
146
  ### Examples