Skip to content

Commit

Permalink
Fix setting of model_file_name (#1114)
Browse files Browse the repository at this point in the history
* Fix setting of model_file_name

* Add CLIP-like unit test for image feature extraction pipeline
  • Loading branch information
xenova authored Dec 25, 2024
1 parent da2c1e9 commit 9056f76
Show file tree
Hide file tree
Showing 2 changed files with 101 additions and 57 deletions.
42 changes: 21 additions & 21 deletions src/models.js
Original file line number Diff line number Diff line change
Expand Up @@ -3421,7 +3421,7 @@ export class MoonshinePreTrainedModel extends PreTrainedModel {
*/
export class MoonshineModel extends MoonshinePreTrainedModel { }

export class MoonshineForConditionalGeneration extends MoonshinePreTrainedModel { }
export class MoonshineForConditionalGeneration extends MoonshinePreTrainedModel { }
//////////////////////////////////////////////////


Expand Down Expand Up @@ -3821,9 +3821,9 @@ export class CLIPTextModel extends CLIPPreTrainedModel {
/** @type {typeof PreTrainedModel.from_pretrained} */
static async from_pretrained(pretrained_model_name_or_path, options = {}) {
return super.from_pretrained(pretrained_model_name_or_path, {
// Update default model file name if not provided
model_file_name: 'text_model',
...options,
// Update default model file name if not provided
model_file_name: options.model_file_name ?? 'text_model',
});
}
}
Expand Down Expand Up @@ -3858,9 +3858,9 @@ export class CLIPTextModelWithProjection extends CLIPPreTrainedModel {
/** @type {typeof PreTrainedModel.from_pretrained} */
static async from_pretrained(pretrained_model_name_or_path, options = {}) {
return super.from_pretrained(pretrained_model_name_or_path, {
// Update default model file name if not provided
model_file_name: 'text_model',
...options,
// Update default model file name if not provided
model_file_name: options.model_file_name ?? 'text_model',
});
}
}
Expand All @@ -3872,9 +3872,9 @@ export class CLIPVisionModel extends CLIPPreTrainedModel {
/** @type {typeof PreTrainedModel.from_pretrained} */
static async from_pretrained(pretrained_model_name_or_path, options = {}) {
return super.from_pretrained(pretrained_model_name_or_path, {
// Update default model file name if not provided
model_file_name: 'vision_model',
...options,
// Update default model file name if not provided
model_file_name: options.model_file_name ?? 'vision_model',
});
}
}
Expand Down Expand Up @@ -3909,9 +3909,9 @@ export class CLIPVisionModelWithProjection extends CLIPPreTrainedModel {
/** @type {typeof PreTrainedModel.from_pretrained} */
static async from_pretrained(pretrained_model_name_or_path, options = {}) {
return super.from_pretrained(pretrained_model_name_or_path, {
// Update default model file name if not provided
model_file_name: 'vision_model',
...options,
// Update default model file name if not provided
model_file_name: options.model_file_name ?? 'vision_model',
});
}
}
Expand Down Expand Up @@ -3997,9 +3997,9 @@ export class SiglipTextModel extends SiglipPreTrainedModel {
/** @type {typeof PreTrainedModel.from_pretrained} */
static async from_pretrained(pretrained_model_name_or_path, options = {}) {
return super.from_pretrained(pretrained_model_name_or_path, {
// Update default model file name if not provided
model_file_name: 'text_model',
...options,
// Update default model file name if not provided
model_file_name: options.model_file_name ?? 'text_model',
});
}
}
Expand Down Expand Up @@ -4034,9 +4034,9 @@ export class SiglipVisionModel extends CLIPPreTrainedModel {
/** @type {typeof PreTrainedModel.from_pretrained} */
static async from_pretrained(pretrained_model_name_or_path, options = {}) {
return super.from_pretrained(pretrained_model_name_or_path, {
// Update default model file name if not provided
model_file_name: 'vision_model',
...options,
// Update default model file name if not provided
model_file_name: options.model_file_name ?? 'vision_model',
});
}
}
Expand Down Expand Up @@ -4093,9 +4093,9 @@ export class JinaCLIPTextModel extends JinaCLIPPreTrainedModel {
/** @type {typeof PreTrainedModel.from_pretrained} */
static async from_pretrained(pretrained_model_name_or_path, options = {}) {
return super.from_pretrained(pretrained_model_name_or_path, {
// Update default model file name if not provided
model_file_name: 'text_model',
...options,
// Update default model file name if not provided
model_file_name: options.model_file_name ?? 'text_model',
});
}
}
Expand All @@ -4104,9 +4104,9 @@ export class JinaCLIPVisionModel extends JinaCLIPPreTrainedModel {
/** @type {typeof PreTrainedModel.from_pretrained} */
static async from_pretrained(pretrained_model_name_or_path, options = {}) {
return super.from_pretrained(pretrained_model_name_or_path, {
// Update default model file name if not provided
model_file_name: 'vision_model',
...options,
// Update default model file name if not provided
model_file_name: options.model_file_name ?? 'vision_model',
});
}
}
Expand Down Expand Up @@ -6338,9 +6338,9 @@ export class ClapTextModelWithProjection extends ClapPreTrainedModel {
/** @type {typeof PreTrainedModel.from_pretrained} */
static async from_pretrained(pretrained_model_name_or_path, options = {}) {
return super.from_pretrained(pretrained_model_name_or_path, {
// Update default model file name if not provided
model_file_name: 'text_model',
...options,
// Update default model file name if not provided
model_file_name: options.model_file_name ?? 'text_model',
});
}
}
Expand Down Expand Up @@ -6375,9 +6375,9 @@ export class ClapAudioModelWithProjection extends ClapPreTrainedModel {
/** @type {typeof PreTrainedModel.from_pretrained} */
static async from_pretrained(pretrained_model_name_or_path, options = {}) {
return super.from_pretrained(pretrained_model_name_or_path, {
// Update default model file name if not provided
model_file_name: 'audio_model',
...options,
// Update default model file name if not provided
model_file_name: options.model_file_name ?? 'audio_model',
});
}
}
Expand Down
116 changes: 80 additions & 36 deletions tests/pipelines/test_pipelines_image_feature_extraction.js
Original file line number Diff line number Diff line change
Expand Up @@ -7,45 +7,89 @@ const PIPELINE_ID = "image-feature-extraction";

export default () => {
describe("Image Feature Extraction", () => {
const model_id = "hf-internal-testing/tiny-random-ViTMAEModel";
/** @type {ImageFeatureExtractionPipeline} */
let pipe;
let images;
beforeAll(async () => {
pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS);
images = await Promise.all([load_cached_image("white_image"), load_cached_image("blue_image")]);
}, MAX_MODEL_LOAD_TIME);

it("should be an instance of ImageFeatureExtractionPipeline", () => {
expect(pipe).toBeInstanceOf(ImageFeatureExtractionPipeline);
});
describe("Default", () => {
const model_id = "hf-internal-testing/tiny-random-ViTMAEModel";
/** @type {ImageFeatureExtractionPipeline} */
let pipe;
let images;
beforeAll(async () => {
pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS);
images = await Promise.all([load_cached_image("white_image"), load_cached_image("blue_image")]);
}, MAX_MODEL_LOAD_TIME);

describe("batch_size=1", () => {
it(
"default",
async () => {
const output = await pipe(images[0]);
expect(output.dims).toEqual([1, 91, 32]);
expect(output.mean().item()).toBeCloseTo(-8.507473614471905e-10, 6);
},
MAX_TEST_EXECUTION_TIME,
);
});
it("should be an instance of ImageFeatureExtractionPipeline", () => {
expect(pipe).toBeInstanceOf(ImageFeatureExtractionPipeline);
});

describe("batch_size=1", () => {
it(
"default",
async () => {
const output = await pipe(images[0]);
expect(output.dims).toEqual([1, 91, 32]);
expect(output.mean().item()).toBeCloseTo(-8.507473614471905e-10, 6);
},
MAX_TEST_EXECUTION_TIME,
);
});

describe("batch_size>1", () => {
it(
"default",
async () => {
const output = await pipe(images);
expect(output.dims).toEqual([images.length, 91, 32]);
expect(output.mean().item()).toBeCloseTo(-5.997602414709036e-10, 6);
},
MAX_TEST_EXECUTION_TIME,
);
});

describe("batch_size>1", () => {
it(
"default",
async () => {
const output = await pipe(images);
expect(output.dims).toEqual([images.length, 91, 32]);
expect(output.mean().item()).toBeCloseTo(-5.997602414709036e-10, 6);
},
MAX_TEST_EXECUTION_TIME,
);
afterAll(async () => {
await pipe.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
describe("CLIP-like", () => {
const model_id = "hf-internal-testing/tiny-random-CLIPModel";
/** @type {ImageFeatureExtractionPipeline} */
let pipe;
let images;
beforeAll(async () => {
pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS);
images = await Promise.all([load_cached_image("white_image"), load_cached_image("blue_image")]);
}, MAX_MODEL_LOAD_TIME);

afterAll(async () => {
await pipe.dispose();
}, MAX_MODEL_DISPOSE_TIME);
it("should be an instance of ImageFeatureExtractionPipeline", () => {
expect(pipe).toBeInstanceOf(ImageFeatureExtractionPipeline);
});

describe("batch_size=1", () => {
it(
"default",
async () => {
const output = await pipe(images[0]);
expect(output.dims).toEqual([1, 64]);
expect(output.mean().item()).toBeCloseTo(-0.11340035498142242, 6);
},
MAX_TEST_EXECUTION_TIME,
);
});

describe("batch_size>1", () => {
it(
"default",
async () => {
const output = await pipe(images);
expect(output.dims).toEqual([images.length, 64]);
expect(output.mean().item()).toBeCloseTo(-0.11006651818752289, 6);
},
MAX_TEST_EXECUTION_TIME,
);
});

afterAll(async () => {
await pipe.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
});
};

0 comments on commit 9056f76

Please sign in to comment.