RyanMullins commited on
Commit
d0eb7f5
1 Parent(s): fed0a26

Docs for the Space

Browse files
Files changed (2) hide show
  1. app.py +93 -13
  2. requirements.txt +1 -1
app.py CHANGED
@@ -145,7 +145,7 @@ with gr.Blocks() as demo:
145
  detectors, and the [SynthID Text documentaiton][raitk-synthid] for more on
146
  how this technology works.
147
 
148
- ## Getting started
149
 
150
  Practically speaking, SynthID Text is a logits processor, applied to your
151
  model's generation pipeline after [Top-K and Top-P][cloud-parameter-values],
@@ -169,10 +169,13 @@ with gr.Blocks() as demo:
169
  `.generate()`, as shown in the snippet below.
170
 
171
  ```python
172
- from transformers import AutoModelForCausalLM, AutoTokenizer
173
- from transformers.generation import SynthIDTextWatermarkingConfig
 
 
 
174
 
175
- # Standard model and toeknizer initialization
176
  tokenizer = AutoTokenizer.from_pretrained('repo/id')
177
  model = AutoModelForCausalLM.from_pretrained('repo/id')
178
 
@@ -189,13 +192,20 @@ with gr.Blocks() as demo:
189
  watermarked_text = tokenizer.batch_decode(output_sequences)
190
  ```
191
 
192
- Enter up to three prompts then click the generate button. After you click,
193
- [Gemma 2B][gemma] will generate a watermarked and non-watermarked repsonses
194
- for each non-empty prompt.
 
 
 
 
 
 
 
195
 
196
  [cloud-parameter-values]: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/adjust-parameter-values
197
  [gemma]: https://huggingface.co/google/gemma-2b
198
- [raitk-synthid]: /responsible/docs/safeguards/synthid
199
  [synthid]: https://deepmind.google/technologies/synthid/
200
  [synthid-hf-config]: https://github.com/huggingface/transformers/blob/v4.46.0/src/transformers/generation/configuration_utils.py
201
  [synthid-hf-detector]: https://github.com/huggingface/transformers/blob/v4.46.0/src/transformers/generation/watermarking.py
@@ -213,7 +223,21 @@ with gr.Blocks() as demo:
213
  with gr.Column(visible=False) as generations_col:
214
  gr.Markdown(
215
  '''
216
- # SynthID: Tool
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217
  '''
218
  )
219
  generations_grp = gr.CheckboxGroup(
@@ -225,7 +249,23 @@ with gr.Blocks() as demo:
225
  with gr.Column(visible=False) as detections_col:
226
  gr.Markdown(
227
  '''
228
- # SynthID: Tool
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
  '''
230
  )
231
  revealed_grp = gr.CheckboxGroup(
@@ -235,7 +275,26 @@ with gr.Blocks() as demo:
235
  'marked as correct or incorrect in the text.'
236
  ),
237
  )
238
- detect_btn = gr.Button('Detect', visible=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
 
240
  def generate(*prompts):
241
  standard, standard_detector = generate_outputs(prompts=prompts)
@@ -295,7 +354,7 @@ with gr.Blocks() as demo:
295
  reveal_btn: gr.Button(visible=False),
296
  detections_col: gr.Column(visible=True),
297
  revealed_grp: gr.CheckboxGroup(choices=choices, value=value),
298
- detect_btn: gr.Button(visible=True),
299
  }
300
 
301
  reveal_btn.click(
@@ -305,7 +364,28 @@ with gr.Blocks() as demo:
305
  reveal_btn,
306
  detections_col,
307
  revealed_grp,
308
- detect_btn
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309
  ],
310
  )
311
 
 
145
  detectors, and the [SynthID Text documentaiton][raitk-synthid] for more on
146
  how this technology works.
147
 
148
+ ## Applying a watermark
149
 
150
  Practically speaking, SynthID Text is a logits processor, applied to your
151
  model's generation pipeline after [Top-K and Top-P][cloud-parameter-values],
 
169
  `.generate()`, as shown in the snippet below.
170
 
171
  ```python
172
+ from transformers import (
173
+ AutoModelForCausalLM,
174
+ AutoTokenizer,
175
+ SynthIDTextWatermarkingConfig,
176
+ )
177
 
178
+ # Standard model and tokenizer initialization
179
  tokenizer = AutoTokenizer.from_pretrained('repo/id')
180
  model = AutoModelForCausalLM.from_pretrained('repo/id')
181
 
 
192
  watermarked_text = tokenizer.batch_decode(output_sequences)
193
  ```
194
 
195
+ ## Try it yourself.
196
+
197
+ Lets use [Gemma 2B IT][gemma] to help you understand how watermarking works.
198
+
199
+ Using the text boxes below enter up to three prompts then click the generate
200
+ button. Some examples are provided to help get you started, but they are
201
+ fully editable.
202
+
203
+ Gemma will then generate watermarked and non-watermarked repsonses for each
204
+ non-empty prompt you provided.
205
 
206
  [cloud-parameter-values]: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/adjust-parameter-values
207
  [gemma]: https://huggingface.co/google/gemma-2b
208
+ [raitk-synthid]: https://ai.google.dev/responsible/docs/safeguards/synthid-text
209
  [synthid]: https://deepmind.google/technologies/synthid/
210
  [synthid-hf-config]: https://github.com/huggingface/transformers/blob/v4.46.0/src/transformers/generation/configuration_utils.py
211
  [synthid-hf-detector]: https://github.com/huggingface/transformers/blob/v4.46.0/src/transformers/generation/watermarking.py
 
223
  with gr.Column(visible=False) as generations_col:
224
  gr.Markdown(
225
  '''
226
+ ## Human recognition of watermarked text
227
+
228
+ The primary goal of SynthID Text is to apply a watermark to generated text
229
+ wihtout affecting generation quality. Another way to think about this is
230
+ that generated text that carries a watermark should be imperceptible to
231
+ you, the reader, but easily perceived by a watermark detector.
232
+
233
+ The responses from Gemma are shown below. Use the checkboxes to mark which
234
+ responses you think are the watermarked, then click the "reveal" button to
235
+ see the true values.
236
+
237
+ The [research paper][synthid-nature] has an in-depth study examining human
238
+ perception of watermared versus non-watermarked text.
239
+
240
+ [synthid-nature]: https://www.nature.com/articles/s41586-024-08025-4
241
  '''
242
  )
243
  generations_grp = gr.CheckboxGroup(
 
249
  with gr.Column(visible=False) as detections_col:
250
  gr.Markdown(
251
  '''
252
+ ## Detecting watermarked text
253
+
254
+ The only way to properly detect watermarked text is with a trained
255
+ classifier. This Space uses a pre-trained classifier hosted on Huggin Face
256
+ Hub. For production uses you will need to train your own classifiers to
257
+ recognize your watermarks. A [Bayesian detector][synthid-hf-detector] is
258
+ provided in Transformers, along with an
259
+ [end-to-end example][synthid-hf-detector-e2e] of how to train one of these
260
+ detectors.
261
+
262
+ You can see how your guesses compared to the actaul results below. As
263
+ above, the responses are displayed in checkboxes. If the box is checked,
264
+ then the text carries a watermark. Your correct guesses are annotated with
265
+ the "Correct" prefix.
266
+
267
+ [synthid-hf-detector]: https://github.com/huggingface/transformers/blob/v4.46.0/src/transformers/generation/watermarking.py
268
+ [synthid-hf-detector-e2e]: https://github.com/huggingface/transformers/blob/v4.46.0/examples/research_projects/synthid_text/detector_bayesian.py
269
  '''
270
  )
271
  revealed_grp = gr.CheckboxGroup(
 
275
  'marked as correct or incorrect in the text.'
276
  ),
277
  )
278
+ gr.Markdown(
279
+ '''
280
+ ## Limitations
281
+
282
+ SynthID Text watermarks are robust to some transformations, such as
283
+ cropping pieces of text, modifying a few words, or mild paraphrasing, but
284
+ this method does have limitations.
285
+
286
+ - Watermark application is less effective on factual responses, as there
287
+ is less opportunity to augment generation without decreasing accuracy.
288
+ - Detector confidence scores can be greatly reduced when an AI-generated
289
+ text is thoroughly rewritten, or translated to another language.
290
+
291
+ SynthID Text is not built to directly stop motivated adversaries from
292
+ causing harm. However, it can make it harder to use AI-generated content
293
+ for malicious purposes, and it can be combined with other approaches to
294
+ give better coverage across content types and platforms.
295
+ '''
296
+ )
297
+ reset_btn = gr.Button('Reset', visible=False)
298
 
299
  def generate(*prompts):
300
  standard, standard_detector = generate_outputs(prompts=prompts)
 
354
  reveal_btn: gr.Button(visible=False),
355
  detections_col: gr.Column(visible=True),
356
  revealed_grp: gr.CheckboxGroup(choices=choices, value=value),
357
+ reset_btn: gr.Button(visible=True),
358
  }
359
 
360
  reveal_btn.click(
 
364
  reveal_btn,
365
  detections_col,
366
  revealed_grp,
367
+ reset_btn
368
+ ],
369
+ )
370
+
371
+ def reset():
372
+ return {
373
+ generations_col: gr.Column(visible=False),
374
+ detections_col: gr.Column(visible=False),
375
+ revealed_grp: gr.CheckboxGroup(visible=False),
376
+ reset_btn: gr.Button(visible=False),
377
+ generate_btn: gr.Button(visible=True),
378
+ }
379
+
380
+ reset_btn.click(
381
+ reset,
382
+ inputs=[],
383
+ outputs=[
384
+ generations_col,
385
+ detections_col,
386
+ revealed_grp,
387
+ reset_btn,
388
+ generate_btn,
389
  ],
390
  )
391
 
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
  gradio
2
  spaces
3
- transformers>=4.46.0
4
 
5
  --extra-index-url https://download.pytorch.org/whl/cu113
6
  torch
 
1
  gradio
2
  spaces
3
+ transformers @ git+https://github.com/sumedhghaisas2/transformers_private
4
 
5
  --extra-index-url https://download.pytorch.org/whl/cu113
6
  torch