]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/platforms/powernv/vas-window.c
2b3eb01ab1107145395b0c697da4743bd2e97c8c
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / platforms / powernv / vas-window.c
1 /*
2 * Copyright 2016-17 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10 #define pr_fmt(fmt) "vas: " fmt
11
12 #include <linux/types.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/io.h>
16 #include <linux/log2.h>
17 #include <linux/rcupdate.h>
18 #include <linux/cred.h>
19 #include <asm/switch_to.h>
20 #include <asm/ppc-opcode.h>
21 #include "vas.h"
22 #include "copy-paste.h"
23
24 /*
25 * Compute the paste address region for the window @window using the
26 * ->paste_base_addr and ->paste_win_id_shift we got from device tree.
27 */
28 static void compute_paste_address(struct vas_window *window, u64 *addr, int *len)
29 {
30 int winid;
31 u64 base, shift;
32
33 base = window->vinst->paste_base_addr;
34 shift = window->vinst->paste_win_id_shift;
35 winid = window->winid;
36
37 *addr = base + (winid << shift);
38 if (len)
39 *len = PAGE_SIZE;
40
41 pr_debug("Txwin #%d: Paste addr 0x%llx\n", winid, *addr);
42 }
43
44 u64 vas_win_paste_addr(struct vas_window *win)
45 {
46 u64 addr;
47
48 compute_paste_address(win, &addr, NULL);
49
50 return addr;
51 }
52 EXPORT_SYMBOL(vas_win_paste_addr);
53
54 static inline void get_hvwc_mmio_bar(struct vas_window *window,
55 u64 *start, int *len)
56 {
57 u64 pbaddr;
58
59 pbaddr = window->vinst->hvwc_bar_start;
60 *start = pbaddr + window->winid * VAS_HVWC_SIZE;
61 *len = VAS_HVWC_SIZE;
62 }
63
64 static inline void get_uwc_mmio_bar(struct vas_window *window,
65 u64 *start, int *len)
66 {
67 u64 pbaddr;
68
69 pbaddr = window->vinst->uwc_bar_start;
70 *start = pbaddr + window->winid * VAS_UWC_SIZE;
71 *len = VAS_UWC_SIZE;
72 }
73
74 /*
75 * Map the paste bus address of the given send window into kernel address
76 * space. Unlike MMIO regions (map_mmio_region() below), paste region must
77 * be mapped cache-able and is only applicable to send windows.
78 */
79 static void *map_paste_region(struct vas_window *txwin)
80 {
81 int len;
82 void *map;
83 char *name;
84 u64 start;
85
86 name = kasprintf(GFP_KERNEL, "window-v%d-w%d", txwin->vinst->vas_id,
87 txwin->winid);
88 if (!name)
89 goto free_name;
90
91 txwin->paste_addr_name = name;
92 compute_paste_address(txwin, &start, &len);
93
94 if (!request_mem_region(start, len, name)) {
95 pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n",
96 __func__, start, len);
97 goto free_name;
98 }
99
100 map = ioremap_cache(start, len);
101 if (!map) {
102 pr_devel("%s(): ioremap_cache(0x%llx, %d) failed\n", __func__,
103 start, len);
104 goto free_name;
105 }
106
107 pr_devel("Mapped paste addr 0x%llx to kaddr 0x%p\n", start, map);
108 return map;
109
110 free_name:
111 kfree(name);
112 return ERR_PTR(-ENOMEM);
113 }
114
115 static void *map_mmio_region(char *name, u64 start, int len)
116 {
117 void *map;
118
119 if (!request_mem_region(start, len, name)) {
120 pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n",
121 __func__, start, len);
122 return NULL;
123 }
124
125 map = ioremap(start, len);
126 if (!map) {
127 pr_devel("%s(): ioremap(0x%llx, %d) failed\n", __func__, start,
128 len);
129 return NULL;
130 }
131
132 return map;
133 }
134
135 static void unmap_region(void *addr, u64 start, int len)
136 {
137 iounmap(addr);
138 release_mem_region((phys_addr_t)start, len);
139 }
140
141 /*
142 * Unmap the paste address region for a window.
143 */
144 static void unmap_paste_region(struct vas_window *window)
145 {
146 int len;
147 u64 busaddr_start;
148
149 if (window->paste_kaddr) {
150 compute_paste_address(window, &busaddr_start, &len);
151 unmap_region(window->paste_kaddr, busaddr_start, len);
152 window->paste_kaddr = NULL;
153 kfree(window->paste_addr_name);
154 window->paste_addr_name = NULL;
155 }
156 }
157
158 /*
159 * Unmap the MMIO regions for a window. Hold the vas_mutex so we don't
160 * unmap when the window's debugfs dir is in use. This serializes close
161 * of a window even on another VAS instance but since its not a critical
162 * path, just minimize the time we hold the mutex for now. We can add
163 * a per-instance mutex later if necessary.
164 */
165 static void unmap_winctx_mmio_bars(struct vas_window *window)
166 {
167 int len;
168 void *uwc_map;
169 void *hvwc_map;
170 u64 busaddr_start;
171
172 mutex_lock(&vas_mutex);
173
174 hvwc_map = window->hvwc_map;
175 window->hvwc_map = NULL;
176
177 uwc_map = window->uwc_map;
178 window->uwc_map = NULL;
179
180 mutex_unlock(&vas_mutex);
181
182 if (hvwc_map) {
183 get_hvwc_mmio_bar(window, &busaddr_start, &len);
184 unmap_region(hvwc_map, busaddr_start, len);
185 }
186
187 if (uwc_map) {
188 get_uwc_mmio_bar(window, &busaddr_start, &len);
189 unmap_region(uwc_map, busaddr_start, len);
190 }
191 }
192
193 /*
194 * Find the Hypervisor Window Context (HVWC) MMIO Base Address Region and the
195 * OS/User Window Context (UWC) MMIO Base Address Region for the given window.
196 * Map these bus addresses and save the mapped kernel addresses in @window.
197 */
198 int map_winctx_mmio_bars(struct vas_window *window)
199 {
200 int len;
201 u64 start;
202
203 get_hvwc_mmio_bar(window, &start, &len);
204 window->hvwc_map = map_mmio_region("HVWCM_Window", start, len);
205
206 get_uwc_mmio_bar(window, &start, &len);
207 window->uwc_map = map_mmio_region("UWCM_Window", start, len);
208
209 if (!window->hvwc_map || !window->uwc_map) {
210 unmap_winctx_mmio_bars(window);
211 return -1;
212 }
213
214 return 0;
215 }
216
217 /*
218 * Reset all valid registers in the HV and OS/User Window Contexts for
219 * the window identified by @window.
220 *
221 * NOTE: We cannot really use a for loop to reset window context. Not all
222 * offsets in a window context are valid registers and the valid
223 * registers are not sequential. And, we can only write to offsets
224 * with valid registers.
225 */
226 void reset_window_regs(struct vas_window *window)
227 {
228 write_hvwc_reg(window, VREG(LPID), 0ULL);
229 write_hvwc_reg(window, VREG(PID), 0ULL);
230 write_hvwc_reg(window, VREG(XLATE_MSR), 0ULL);
231 write_hvwc_reg(window, VREG(XLATE_LPCR), 0ULL);
232 write_hvwc_reg(window, VREG(XLATE_CTL), 0ULL);
233 write_hvwc_reg(window, VREG(AMR), 0ULL);
234 write_hvwc_reg(window, VREG(SEIDR), 0ULL);
235 write_hvwc_reg(window, VREG(FAULT_TX_WIN), 0ULL);
236 write_hvwc_reg(window, VREG(OSU_INTR_SRC_RA), 0ULL);
237 write_hvwc_reg(window, VREG(HV_INTR_SRC_RA), 0ULL);
238 write_hvwc_reg(window, VREG(PSWID), 0ULL);
239 write_hvwc_reg(window, VREG(LFIFO_BAR), 0ULL);
240 write_hvwc_reg(window, VREG(LDATA_STAMP_CTL), 0ULL);
241 write_hvwc_reg(window, VREG(LDMA_CACHE_CTL), 0ULL);
242 write_hvwc_reg(window, VREG(LRFIFO_PUSH), 0ULL);
243 write_hvwc_reg(window, VREG(CURR_MSG_COUNT), 0ULL);
244 write_hvwc_reg(window, VREG(LNOTIFY_AFTER_COUNT), 0ULL);
245 write_hvwc_reg(window, VREG(LRX_WCRED), 0ULL);
246 write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL);
247 write_hvwc_reg(window, VREG(TX_WCRED), 0ULL);
248 write_hvwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL);
249 write_hvwc_reg(window, VREG(LFIFO_SIZE), 0ULL);
250 write_hvwc_reg(window, VREG(WINCTL), 0ULL);
251 write_hvwc_reg(window, VREG(WIN_STATUS), 0ULL);
252 write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), 0ULL);
253 write_hvwc_reg(window, VREG(TX_RSVD_BUF_COUNT), 0ULL);
254 write_hvwc_reg(window, VREG(LRFIFO_WIN_PTR), 0ULL);
255 write_hvwc_reg(window, VREG(LNOTIFY_CTL), 0ULL);
256 write_hvwc_reg(window, VREG(LNOTIFY_PID), 0ULL);
257 write_hvwc_reg(window, VREG(LNOTIFY_LPID), 0ULL);
258 write_hvwc_reg(window, VREG(LNOTIFY_TID), 0ULL);
259 write_hvwc_reg(window, VREG(LNOTIFY_SCOPE), 0ULL);
260 write_hvwc_reg(window, VREG(NX_UTIL_ADDER), 0ULL);
261
262 /* Skip read-only registers: NX_UTIL and NX_UTIL_SE */
263
264 /*
265 * The send and receive window credit adder registers are also
266 * accessible from HVWC and have been initialized above. We don't
267 * need to initialize from the OS/User Window Context, so skip
268 * following calls:
269 *
270 * write_uwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL);
271 * write_uwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL);
272 */
273 }
274
275 /*
276 * Initialize window context registers related to Address Translation.
277 * These registers are common to send/receive windows although they
278 * differ for user/kernel windows. As we resolve the TODOs we may
279 * want to add fields to vas_winctx and move the initialization to
280 * init_vas_winctx_regs().
281 */
282 static void init_xlate_regs(struct vas_window *window, bool user_win)
283 {
284 u64 lpcr, val;
285
286 /*
287 * MSR_TA, MSR_US are false for both kernel and user.
288 * MSR_DR and MSR_PR are false for kernel.
289 */
290 val = 0ULL;
291 val = SET_FIELD(VAS_XLATE_MSR_HV, val, 1);
292 val = SET_FIELD(VAS_XLATE_MSR_SF, val, 1);
293 if (user_win) {
294 val = SET_FIELD(VAS_XLATE_MSR_DR, val, 1);
295 val = SET_FIELD(VAS_XLATE_MSR_PR, val, 1);
296 }
297 write_hvwc_reg(window, VREG(XLATE_MSR), val);
298
299 lpcr = mfspr(SPRN_LPCR);
300 val = 0ULL;
301 /*
302 * NOTE: From Section 5.7.8.1 Segment Lookaside Buffer of the
303 * Power ISA, v3.0B, Page size encoding is 0 = 4KB, 5 = 64KB.
304 *
305 * NOTE: From Section 1.3.1, Address Translation Context of the
306 * Nest MMU Workbook, LPCR_SC should be 0 for Power9.
307 */
308 val = SET_FIELD(VAS_XLATE_LPCR_PAGE_SIZE, val, 5);
309 val = SET_FIELD(VAS_XLATE_LPCR_ISL, val, lpcr & LPCR_ISL);
310 val = SET_FIELD(VAS_XLATE_LPCR_TC, val, lpcr & LPCR_TC);
311 val = SET_FIELD(VAS_XLATE_LPCR_SC, val, 0);
312 write_hvwc_reg(window, VREG(XLATE_LPCR), val);
313
314 /*
315 * Section 1.3.1 (Address translation Context) of NMMU workbook.
316 * 0b00 Hashed Page Table mode
317 * 0b01 Reserved
318 * 0b10 Radix on HPT
319 * 0b11 Radix on Radix
320 */
321 val = 0ULL;
322 val = SET_FIELD(VAS_XLATE_MODE, val, radix_enabled() ? 3 : 2);
323 write_hvwc_reg(window, VREG(XLATE_CTL), val);
324
325 /*
326 * TODO: Can we mfspr(AMR) even for user windows?
327 */
328 val = 0ULL;
329 val = SET_FIELD(VAS_AMR, val, mfspr(SPRN_AMR));
330 write_hvwc_reg(window, VREG(AMR), val);
331
332 val = 0ULL;
333 val = SET_FIELD(VAS_SEIDR, val, 0);
334 write_hvwc_reg(window, VREG(SEIDR), val);
335 }
336
337 /*
338 * Initialize Reserved Send Buffer Count for the send window. It involves
339 * writing to the register, reading it back to confirm that the hardware
340 * has enough buffers to reserve. See section 1.3.1.2.1 of VAS workbook.
341 *
342 * Since we can only make a best-effort attempt to fulfill the request,
343 * we don't return any errors if we cannot.
344 *
345 * TODO: Reserved (aka dedicated) send buffers are not supported yet.
346 */
347 static void init_rsvd_tx_buf_count(struct vas_window *txwin,
348 struct vas_winctx *winctx)
349 {
350 write_hvwc_reg(txwin, VREG(TX_RSVD_BUF_COUNT), 0ULL);
351 }
352
353 /*
354 * init_winctx_regs()
355 * Initialize window context registers for a receive window.
356 * Except for caching control and marking window open, the registers
357 * are initialized in the order listed in Section 3.1.4 (Window Context
358 * Cache Register Details) of the VAS workbook although they don't need
359 * to be.
360 *
361 * Design note: For NX receive windows, NX allocates the FIFO buffer in OPAL
362 * (so that it can get a large contiguous area) and passes that buffer
363 * to kernel via device tree. We now write that buffer address to the
364 * FIFO BAR. Would it make sense to do this all in OPAL? i.e have OPAL
365 * write the per-chip RX FIFO addresses to the windows during boot-up
366 * as a one-time task? That could work for NX but what about other
367 * receivers? Let the receivers tell us the rx-fifo buffers for now.
368 */
369 int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx)
370 {
371 u64 val;
372 int fifo_size;
373
374 reset_window_regs(window);
375
376 val = 0ULL;
377 val = SET_FIELD(VAS_LPID, val, winctx->lpid);
378 write_hvwc_reg(window, VREG(LPID), val);
379
380 val = 0ULL;
381 val = SET_FIELD(VAS_PID_ID, val, winctx->pidr);
382 write_hvwc_reg(window, VREG(PID), val);
383
384 init_xlate_regs(window, winctx->user_win);
385
386 val = 0ULL;
387 val = SET_FIELD(VAS_FAULT_TX_WIN, val, 0);
388 write_hvwc_reg(window, VREG(FAULT_TX_WIN), val);
389
390 /* In PowerNV, interrupts go to HV. */
391 write_hvwc_reg(window, VREG(OSU_INTR_SRC_RA), 0ULL);
392
393 val = 0ULL;
394 val = SET_FIELD(VAS_HV_INTR_SRC_RA, val, winctx->irq_port);
395 write_hvwc_reg(window, VREG(HV_INTR_SRC_RA), val);
396
397 val = 0ULL;
398 val = SET_FIELD(VAS_PSWID_EA_HANDLE, val, winctx->pswid);
399 write_hvwc_reg(window, VREG(PSWID), val);
400
401 write_hvwc_reg(window, VREG(SPARE1), 0ULL);
402 write_hvwc_reg(window, VREG(SPARE2), 0ULL);
403 write_hvwc_reg(window, VREG(SPARE3), 0ULL);
404
405 /*
406 * NOTE: VAS expects the FIFO address to be copied into the LFIFO_BAR
407 * register as is - do NOT shift the address into VAS_LFIFO_BAR
408 * bit fields! Ok to set the page migration select fields -
409 * VAS ignores the lower 10+ bits in the address anyway, because
410 * the minimum FIFO size is 1K?
411 *
412 * See also: Design note in function header.
413 */
414 val = __pa(winctx->rx_fifo);
415 val = SET_FIELD(VAS_PAGE_MIGRATION_SELECT, val, 0);
416 write_hvwc_reg(window, VREG(LFIFO_BAR), val);
417
418 val = 0ULL;
419 val = SET_FIELD(VAS_LDATA_STAMP, val, winctx->data_stamp);
420 write_hvwc_reg(window, VREG(LDATA_STAMP_CTL), val);
421
422 val = 0ULL;
423 val = SET_FIELD(VAS_LDMA_TYPE, val, winctx->dma_type);
424 val = SET_FIELD(VAS_LDMA_FIFO_DISABLE, val, winctx->fifo_disable);
425 write_hvwc_reg(window, VREG(LDMA_CACHE_CTL), val);
426
427 write_hvwc_reg(window, VREG(LRFIFO_PUSH), 0ULL);
428 write_hvwc_reg(window, VREG(CURR_MSG_COUNT), 0ULL);
429 write_hvwc_reg(window, VREG(LNOTIFY_AFTER_COUNT), 0ULL);
430
431 val = 0ULL;
432 val = SET_FIELD(VAS_LRX_WCRED, val, winctx->wcreds_max);
433 write_hvwc_reg(window, VREG(LRX_WCRED), val);
434
435 val = 0ULL;
436 val = SET_FIELD(VAS_TX_WCRED, val, winctx->wcreds_max);
437 write_hvwc_reg(window, VREG(TX_WCRED), val);
438
439 write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL);
440 write_hvwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL);
441
442 fifo_size = winctx->rx_fifo_size / 1024;
443
444 val = 0ULL;
445 val = SET_FIELD(VAS_LFIFO_SIZE, val, ilog2(fifo_size));
446 write_hvwc_reg(window, VREG(LFIFO_SIZE), val);
447
448 /* Update window control and caching control registers last so
449 * we mark the window open only after fully initializing it and
450 * pushing context to cache.
451 */
452
453 write_hvwc_reg(window, VREG(WIN_STATUS), 0ULL);
454
455 init_rsvd_tx_buf_count(window, winctx);
456
457 /* for a send window, point to the matching receive window */
458 val = 0ULL;
459 val = SET_FIELD(VAS_LRX_WIN_ID, val, winctx->rx_win_id);
460 write_hvwc_reg(window, VREG(LRFIFO_WIN_PTR), val);
461
462 write_hvwc_reg(window, VREG(SPARE4), 0ULL);
463
464 val = 0ULL;
465 val = SET_FIELD(VAS_NOTIFY_DISABLE, val, winctx->notify_disable);
466 val = SET_FIELD(VAS_INTR_DISABLE, val, winctx->intr_disable);
467 val = SET_FIELD(VAS_NOTIFY_EARLY, val, winctx->notify_early);
468 val = SET_FIELD(VAS_NOTIFY_OSU_INTR, val, winctx->notify_os_intr_reg);
469 write_hvwc_reg(window, VREG(LNOTIFY_CTL), val);
470
471 val = 0ULL;
472 val = SET_FIELD(VAS_LNOTIFY_PID, val, winctx->lnotify_pid);
473 write_hvwc_reg(window, VREG(LNOTIFY_PID), val);
474
475 val = 0ULL;
476 val = SET_FIELD(VAS_LNOTIFY_LPID, val, winctx->lnotify_lpid);
477 write_hvwc_reg(window, VREG(LNOTIFY_LPID), val);
478
479 val = 0ULL;
480 val = SET_FIELD(VAS_LNOTIFY_TID, val, winctx->lnotify_tid);
481 write_hvwc_reg(window, VREG(LNOTIFY_TID), val);
482
483 val = 0ULL;
484 val = SET_FIELD(VAS_LNOTIFY_MIN_SCOPE, val, winctx->min_scope);
485 val = SET_FIELD(VAS_LNOTIFY_MAX_SCOPE, val, winctx->max_scope);
486 write_hvwc_reg(window, VREG(LNOTIFY_SCOPE), val);
487
488 /* Skip read-only registers NX_UTIL and NX_UTIL_SE */
489
490 write_hvwc_reg(window, VREG(SPARE5), 0ULL);
491 write_hvwc_reg(window, VREG(NX_UTIL_ADDER), 0ULL);
492 write_hvwc_reg(window, VREG(SPARE6), 0ULL);
493
494 /* Finally, push window context to memory and... */
495 val = 0ULL;
496 val = SET_FIELD(VAS_PUSH_TO_MEM, val, 1);
497 write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), val);
498
499 /* ... mark the window open for business */
500 val = 0ULL;
501 val = SET_FIELD(VAS_WINCTL_REJ_NO_CREDIT, val, winctx->rej_no_credit);
502 val = SET_FIELD(VAS_WINCTL_PIN, val, winctx->pin_win);
503 val = SET_FIELD(VAS_WINCTL_TX_WCRED_MODE, val, winctx->tx_wcred_mode);
504 val = SET_FIELD(VAS_WINCTL_RX_WCRED_MODE, val, winctx->rx_wcred_mode);
505 val = SET_FIELD(VAS_WINCTL_TX_WORD_MODE, val, winctx->tx_word_mode);
506 val = SET_FIELD(VAS_WINCTL_RX_WORD_MODE, val, winctx->rx_word_mode);
507 val = SET_FIELD(VAS_WINCTL_FAULT_WIN, val, winctx->fault_win);
508 val = SET_FIELD(VAS_WINCTL_NX_WIN, val, winctx->nx_win);
509 val = SET_FIELD(VAS_WINCTL_OPEN, val, 1);
510 write_hvwc_reg(window, VREG(WINCTL), val);
511
512 return 0;
513 }
514
515 static DEFINE_SPINLOCK(vas_ida_lock);
516
517 static void vas_release_window_id(struct ida *ida, int winid)
518 {
519 spin_lock(&vas_ida_lock);
520 ida_remove(ida, winid);
521 spin_unlock(&vas_ida_lock);
522 }
523
524 static int vas_assign_window_id(struct ida *ida)
525 {
526 int rc, winid;
527
528 do {
529 rc = ida_pre_get(ida, GFP_KERNEL);
530 if (!rc)
531 return -EAGAIN;
532
533 spin_lock(&vas_ida_lock);
534 rc = ida_get_new(ida, &winid);
535 spin_unlock(&vas_ida_lock);
536 } while (rc == -EAGAIN);
537
538 if (rc)
539 return rc;
540
541 if (winid > VAS_WINDOWS_PER_CHIP) {
542 pr_err("Too many (%d) open windows\n", winid);
543 vas_release_window_id(ida, winid);
544 return -EAGAIN;
545 }
546
547 return winid;
548 }
549
550 static void vas_window_free(struct vas_window *window)
551 {
552 int winid = window->winid;
553 struct vas_instance *vinst = window->vinst;
554
555 unmap_winctx_mmio_bars(window);
556
557 vas_window_free_dbgdir(window);
558
559 kfree(window);
560
561 vas_release_window_id(&vinst->ida, winid);
562 }
563
564 static struct vas_window *vas_window_alloc(struct vas_instance *vinst)
565 {
566 int winid;
567 struct vas_window *window;
568
569 winid = vas_assign_window_id(&vinst->ida);
570 if (winid < 0)
571 return ERR_PTR(winid);
572
573 window = kzalloc(sizeof(*window), GFP_KERNEL);
574 if (!window)
575 goto out_free;
576
577 window->vinst = vinst;
578 window->winid = winid;
579
580 if (map_winctx_mmio_bars(window))
581 goto out_free;
582
583 vas_window_init_dbgdir(window);
584
585 return window;
586
587 out_free:
588 kfree(window);
589 vas_release_window_id(&vinst->ida, winid);
590 return ERR_PTR(-ENOMEM);
591 }
592
593 static void put_rx_win(struct vas_window *rxwin)
594 {
595 /* Better not be a send window! */
596 WARN_ON_ONCE(rxwin->tx_win);
597
598 atomic_dec(&rxwin->num_txwins);
599 }
600
601 /*
602 * Find the user space receive window given the @pswid.
603 * - We must have a valid vasid and it must belong to this instance.
604 * (so both send and receive windows are on the same VAS instance)
605 * - The window must refer to an OPEN, FTW, RECEIVE window.
606 *
607 * NOTE: We access ->windows[] table and assume that vinst->mutex is held.
608 */
609 static struct vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid)
610 {
611 int vasid, winid;
612 struct vas_window *rxwin;
613
614 decode_pswid(pswid, &vasid, &winid);
615
616 if (vinst->vas_id != vasid)
617 return ERR_PTR(-EINVAL);
618
619 rxwin = vinst->windows[winid];
620
621 if (!rxwin || rxwin->tx_win || rxwin->cop != VAS_COP_TYPE_FTW)
622 return ERR_PTR(-EINVAL);
623
624 return rxwin;
625 }
626
627 /*
628 * Get the VAS receive window associated with NX engine identified
629 * by @cop and if applicable, @pswid.
630 *
631 * See also function header of set_vinst_win().
632 */
633 static struct vas_window *get_vinst_rxwin(struct vas_instance *vinst,
634 enum vas_cop_type cop, u32 pswid)
635 {
636 struct vas_window *rxwin;
637
638 mutex_lock(&vinst->mutex);
639
640 if (cop == VAS_COP_TYPE_FTW)
641 rxwin = get_user_rxwin(vinst, pswid);
642 else
643 rxwin = vinst->rxwin[cop] ?: ERR_PTR(-EINVAL);
644
645 if (!IS_ERR(rxwin))
646 atomic_inc(&rxwin->num_txwins);
647
648 mutex_unlock(&vinst->mutex);
649
650 return rxwin;
651 }
652
653 /*
654 * We have two tables of windows in a VAS instance. The first one,
655 * ->windows[], contains all the windows in the instance and allows
656 * looking up a window by its id. It is used to look up send windows
657 * during fault handling and receive windows when pairing user space
658 * send/receive windows.
659 *
660 * The second table, ->rxwin[], contains receive windows that are
661 * associated with NX engines. This table has VAS_COP_TYPE_MAX
662 * entries and is used to look up a receive window by its
663 * coprocessor type.
664 *
665 * Here, we save @window in the ->windows[] table. If it is a receive
666 * window, we also save the window in the ->rxwin[] table.
667 */
668 static void set_vinst_win(struct vas_instance *vinst,
669 struct vas_window *window)
670 {
671 int id = window->winid;
672
673 mutex_lock(&vinst->mutex);
674
675 /*
676 * There should only be one receive window for a coprocessor type
677 * unless its a user (FTW) window.
678 */
679 if (!window->user_win && !window->tx_win) {
680 WARN_ON_ONCE(vinst->rxwin[window->cop]);
681 vinst->rxwin[window->cop] = window;
682 }
683
684 WARN_ON_ONCE(vinst->windows[id] != NULL);
685 vinst->windows[id] = window;
686
687 mutex_unlock(&vinst->mutex);
688 }
689
690 /*
691 * Clear this window from the table(s) of windows for this VAS instance.
692 * See also function header of set_vinst_win().
693 */
694 static void clear_vinst_win(struct vas_window *window)
695 {
696 int id = window->winid;
697 struct vas_instance *vinst = window->vinst;
698
699 mutex_lock(&vinst->mutex);
700
701 if (!window->user_win && !window->tx_win) {
702 WARN_ON_ONCE(!vinst->rxwin[window->cop]);
703 vinst->rxwin[window->cop] = NULL;
704 }
705
706 WARN_ON_ONCE(vinst->windows[id] != window);
707 vinst->windows[id] = NULL;
708
709 mutex_unlock(&vinst->mutex);
710 }
711
712 static void init_winctx_for_rxwin(struct vas_window *rxwin,
713 struct vas_rx_win_attr *rxattr,
714 struct vas_winctx *winctx)
715 {
716 /*
717 * We first zero (memset()) all fields and only set non-zero fields.
718 * Following fields are 0/false but maybe deserve a comment:
719 *
720 * ->notify_os_intr_reg In powerNV, send intrs to HV
721 * ->notify_disable False for NX windows
722 * ->intr_disable False for Fault Windows
723 * ->xtra_write False for NX windows
724 * ->notify_early NA for NX windows
725 * ->rsvd_txbuf_count NA for Rx windows
726 * ->lpid, ->pid, ->tid NA for Rx windows
727 */
728
729 memset(winctx, 0, sizeof(struct vas_winctx));
730
731 winctx->rx_fifo = rxattr->rx_fifo;
732 winctx->rx_fifo_size = rxattr->rx_fifo_size;
733 winctx->wcreds_max = rxwin->wcreds_max;
734 winctx->pin_win = rxattr->pin_win;
735
736 winctx->nx_win = rxattr->nx_win;
737 winctx->fault_win = rxattr->fault_win;
738 winctx->user_win = rxattr->user_win;
739 winctx->rej_no_credit = rxattr->rej_no_credit;
740 winctx->rx_word_mode = rxattr->rx_win_ord_mode;
741 winctx->tx_word_mode = rxattr->tx_win_ord_mode;
742 winctx->rx_wcred_mode = rxattr->rx_wcred_mode;
743 winctx->tx_wcred_mode = rxattr->tx_wcred_mode;
744 winctx->notify_early = rxattr->notify_early;
745
746 if (winctx->nx_win) {
747 winctx->data_stamp = true;
748 winctx->intr_disable = true;
749 winctx->pin_win = true;
750
751 WARN_ON_ONCE(winctx->fault_win);
752 WARN_ON_ONCE(!winctx->rx_word_mode);
753 WARN_ON_ONCE(!winctx->tx_word_mode);
754 WARN_ON_ONCE(winctx->notify_after_count);
755 } else if (winctx->fault_win) {
756 winctx->notify_disable = true;
757 } else if (winctx->user_win) {
758 /*
759 * Section 1.8.1 Low Latency Core-Core Wake up of
760 * the VAS workbook:
761 *
762 * - disable credit checks ([tr]x_wcred_mode = false)
763 * - disable FIFO writes
764 * - enable ASB_Notify, disable interrupt
765 */
766 winctx->fifo_disable = true;
767 winctx->intr_disable = true;
768 winctx->rx_fifo = NULL;
769 }
770
771 winctx->lnotify_lpid = rxattr->lnotify_lpid;
772 winctx->lnotify_pid = rxattr->lnotify_pid;
773 winctx->lnotify_tid = rxattr->lnotify_tid;
774 winctx->pswid = rxattr->pswid;
775 winctx->dma_type = VAS_DMA_TYPE_INJECT;
776 winctx->tc_mode = rxattr->tc_mode;
777
778 winctx->min_scope = VAS_SCOPE_LOCAL;
779 winctx->max_scope = VAS_SCOPE_VECTORED_GROUP;
780 }
781
782 static bool rx_win_args_valid(enum vas_cop_type cop,
783 struct vas_rx_win_attr *attr)
784 {
785 pr_debug("Rxattr: fault %d, notify %d, intr %d, early %d, fifo %d\n",
786 attr->fault_win, attr->notify_disable,
787 attr->intr_disable, attr->notify_early,
788 attr->rx_fifo_size);
789
790 if (cop >= VAS_COP_TYPE_MAX)
791 return false;
792
793 if (cop != VAS_COP_TYPE_FTW &&
794 attr->rx_fifo_size < VAS_RX_FIFO_SIZE_MIN)
795 return false;
796
797 if (attr->rx_fifo_size > VAS_RX_FIFO_SIZE_MAX)
798 return false;
799
800 if (attr->wcreds_max > VAS_RX_WCREDS_MAX)
801 return false;
802
803 if (attr->nx_win) {
804 /* cannot be fault or user window if it is nx */
805 if (attr->fault_win || attr->user_win)
806 return false;
807 /*
808 * Section 3.1.4.32: NX Windows must not disable notification,
809 * and must not enable interrupts or early notification.
810 */
811 if (attr->notify_disable || !attr->intr_disable ||
812 attr->notify_early)
813 return false;
814 } else if (attr->fault_win) {
815 /* cannot be both fault and user window */
816 if (attr->user_win)
817 return false;
818
819 /*
820 * Section 3.1.4.32: Fault windows must disable notification
821 * but not interrupts.
822 */
823 if (!attr->notify_disable || attr->intr_disable)
824 return false;
825
826 } else if (attr->user_win) {
827 /*
828 * User receive windows are only for fast-thread-wakeup
829 * (FTW). They don't need a FIFO and must disable interrupts
830 */
831 if (attr->rx_fifo || attr->rx_fifo_size || !attr->intr_disable)
832 return false;
833 } else {
834 /* Rx window must be one of NX or Fault or User window. */
835 return false;
836 }
837
838 return true;
839 }
840
841 void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop)
842 {
843 memset(rxattr, 0, sizeof(*rxattr));
844
845 if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI) {
846 rxattr->pin_win = true;
847 rxattr->nx_win = true;
848 rxattr->fault_win = false;
849 rxattr->intr_disable = true;
850 rxattr->rx_wcred_mode = true;
851 rxattr->tx_wcred_mode = true;
852 rxattr->rx_win_ord_mode = true;
853 rxattr->tx_win_ord_mode = true;
854 } else if (cop == VAS_COP_TYPE_FAULT) {
855 rxattr->pin_win = true;
856 rxattr->fault_win = true;
857 rxattr->notify_disable = true;
858 rxattr->rx_wcred_mode = true;
859 rxattr->tx_wcred_mode = true;
860 rxattr->rx_win_ord_mode = true;
861 rxattr->tx_win_ord_mode = true;
862 } else if (cop == VAS_COP_TYPE_FTW) {
863 rxattr->user_win = true;
864 rxattr->intr_disable = true;
865
866 /*
867 * As noted in the VAS Workbook we disable credit checks.
868 * If we enable credit checks in the future, we must also
869 * implement a mechanism to return the user credits or new
870 * paste operations will fail.
871 */
872 }
873 }
874 EXPORT_SYMBOL_GPL(vas_init_rx_win_attr);
875
876 struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop,
877 struct vas_rx_win_attr *rxattr)
878 {
879 struct vas_window *rxwin;
880 struct vas_winctx winctx;
881 struct vas_instance *vinst;
882
883 if (!rx_win_args_valid(cop, rxattr))
884 return ERR_PTR(-EINVAL);
885
886 vinst = find_vas_instance(vasid);
887 if (!vinst) {
888 pr_devel("vasid %d not found!\n", vasid);
889 return ERR_PTR(-EINVAL);
890 }
891 pr_devel("Found instance %d\n", vasid);
892
893 rxwin = vas_window_alloc(vinst);
894 if (IS_ERR(rxwin)) {
895 pr_devel("Unable to allocate memory for Rx window\n");
896 return rxwin;
897 }
898
899 rxwin->tx_win = false;
900 rxwin->nx_win = rxattr->nx_win;
901 rxwin->user_win = rxattr->user_win;
902 rxwin->cop = cop;
903 rxwin->wcreds_max = rxattr->wcreds_max ?: VAS_WCREDS_DEFAULT;
904 if (rxattr->user_win)
905 rxwin->pid = task_pid_vnr(current);
906
907 init_winctx_for_rxwin(rxwin, rxattr, &winctx);
908 init_winctx_regs(rxwin, &winctx);
909
910 set_vinst_win(vinst, rxwin);
911
912 return rxwin;
913 }
914 EXPORT_SYMBOL_GPL(vas_rx_win_open);
915
916 void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr, enum vas_cop_type cop)
917 {
918 memset(txattr, 0, sizeof(*txattr));
919
920 if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI) {
921 txattr->rej_no_credit = false;
922 txattr->rx_wcred_mode = true;
923 txattr->tx_wcred_mode = true;
924 txattr->rx_win_ord_mode = true;
925 txattr->tx_win_ord_mode = true;
926 } else if (cop == VAS_COP_TYPE_FTW) {
927 txattr->user_win = true;
928 }
929 }
930 EXPORT_SYMBOL_GPL(vas_init_tx_win_attr);
931
932 static void init_winctx_for_txwin(struct vas_window *txwin,
933 struct vas_tx_win_attr *txattr,
934 struct vas_winctx *winctx)
935 {
936 /*
937 * We first zero all fields and only set non-zero ones. Following
938 * are some fields set to 0/false for the stated reason:
939 *
940 * ->notify_os_intr_reg In powernv, send intrs to HV
941 * ->rsvd_txbuf_count Not supported yet.
942 * ->notify_disable False for NX windows
943 * ->xtra_write False for NX windows
944 * ->notify_early NA for NX windows
945 * ->lnotify_lpid NA for Tx windows
946 * ->lnotify_pid NA for Tx windows
947 * ->lnotify_tid NA for Tx windows
948 * ->tx_win_cred_mode Ignore for now for NX windows
949 * ->rx_win_cred_mode Ignore for now for NX windows
950 */
951 memset(winctx, 0, sizeof(struct vas_winctx));
952
953 winctx->wcreds_max = txwin->wcreds_max;
954
955 winctx->user_win = txattr->user_win;
956 winctx->nx_win = txwin->rxwin->nx_win;
957 winctx->pin_win = txattr->pin_win;
958 winctx->rej_no_credit = txattr->rej_no_credit;
959 winctx->rsvd_txbuf_enable = txattr->rsvd_txbuf_enable;
960
961 winctx->rx_wcred_mode = txattr->rx_wcred_mode;
962 winctx->tx_wcred_mode = txattr->tx_wcred_mode;
963 winctx->rx_word_mode = txattr->rx_win_ord_mode;
964 winctx->tx_word_mode = txattr->tx_win_ord_mode;
965 winctx->rsvd_txbuf_count = txattr->rsvd_txbuf_count;
966
967 winctx->intr_disable = true;
968 if (winctx->nx_win)
969 winctx->data_stamp = true;
970
971 winctx->lpid = txattr->lpid;
972 winctx->pidr = txattr->pidr;
973 winctx->rx_win_id = txwin->rxwin->winid;
974
975 winctx->dma_type = VAS_DMA_TYPE_INJECT;
976 winctx->tc_mode = txattr->tc_mode;
977 winctx->min_scope = VAS_SCOPE_LOCAL;
978 winctx->max_scope = VAS_SCOPE_VECTORED_GROUP;
979
980 winctx->pswid = 0;
981 }
982
983 static bool tx_win_args_valid(enum vas_cop_type cop,
984 struct vas_tx_win_attr *attr)
985 {
986 if (attr->tc_mode != VAS_THRESH_DISABLED)
987 return false;
988
989 if (cop > VAS_COP_TYPE_MAX)
990 return false;
991
992 if (attr->wcreds_max > VAS_TX_WCREDS_MAX)
993 return false;
994
995 if (attr->user_win &&
996 (cop != VAS_COP_TYPE_FTW || attr->rsvd_txbuf_count))
997 return false;
998
999 return true;
1000 }
1001
1002 struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
1003 struct vas_tx_win_attr *attr)
1004 {
1005 int rc;
1006 struct vas_window *txwin;
1007 struct vas_window *rxwin;
1008 struct vas_winctx winctx;
1009 struct vas_instance *vinst;
1010
1011 if (!tx_win_args_valid(cop, attr))
1012 return ERR_PTR(-EINVAL);
1013
1014 /*
1015 * If caller did not specify a vasid but specified the PSWID of a
1016 * receive window (applicable only to FTW windows), use the vasid
1017 * from that receive window.
1018 */
1019 if (vasid == -1 && attr->pswid)
1020 decode_pswid(attr->pswid, &vasid, NULL);
1021
1022 vinst = find_vas_instance(vasid);
1023 if (!vinst) {
1024 pr_devel("vasid %d not found!\n", vasid);
1025 return ERR_PTR(-EINVAL);
1026 }
1027
1028 rxwin = get_vinst_rxwin(vinst, cop, attr->pswid);
1029 if (IS_ERR(rxwin)) {
1030 pr_devel("No RxWin for vasid %d, cop %d\n", vasid, cop);
1031 return rxwin;
1032 }
1033
1034 txwin = vas_window_alloc(vinst);
1035 if (IS_ERR(txwin)) {
1036 rc = PTR_ERR(txwin);
1037 goto put_rxwin;
1038 }
1039
1040 txwin->cop = cop;
1041 txwin->tx_win = 1;
1042 txwin->rxwin = rxwin;
1043 txwin->nx_win = txwin->rxwin->nx_win;
1044 txwin->pid = attr->pid;
1045 txwin->user_win = attr->user_win;
1046 txwin->wcreds_max = attr->wcreds_max ?: VAS_WCREDS_DEFAULT;
1047
1048 init_winctx_for_txwin(txwin, attr, &winctx);
1049
1050 init_winctx_regs(txwin, &winctx);
1051
1052 /*
1053 * If its a kernel send window, map the window address into the
1054 * kernel's address space. For user windows, user must issue an
1055 * mmap() to map the window into their address space.
1056 *
1057 * NOTE: If kernel ever resubmits a user CRB after handling a page
1058 * fault, we will need to map this into kernel as well.
1059 */
1060 if (!txwin->user_win) {
1061 txwin->paste_kaddr = map_paste_region(txwin);
1062 if (IS_ERR(txwin->paste_kaddr)) {
1063 rc = PTR_ERR(txwin->paste_kaddr);
1064 goto free_window;
1065 }
1066 }
1067
1068 /*
1069 * Now that we have a send window, ensure context switch issues
1070 * CP_ABORT for this thread.
1071 */
1072 rc = -EINVAL;
1073 if (set_thread_uses_vas() < 0)
1074 goto free_window;
1075
1076 set_vinst_win(vinst, txwin);
1077
1078 return txwin;
1079
1080 free_window:
1081 vas_window_free(txwin);
1082
1083 put_rxwin:
1084 put_rx_win(rxwin);
1085 return ERR_PTR(rc);
1086
1087 }
1088 EXPORT_SYMBOL_GPL(vas_tx_win_open);
1089
1090 int vas_copy_crb(void *crb, int offset)
1091 {
1092 return vas_copy(crb, offset);
1093 }
1094 EXPORT_SYMBOL_GPL(vas_copy_crb);
1095
1096 #define RMA_LSMP_REPORT_ENABLE PPC_BIT(53)
1097 int vas_paste_crb(struct vas_window *txwin, int offset, bool re)
1098 {
1099 int rc;
1100 void *addr;
1101 uint64_t val;
1102
1103 /*
1104 * Only NX windows are supported for now and hardware assumes
1105 * report-enable flag is set for NX windows. Ensure software
1106 * complies too.
1107 */
1108 WARN_ON_ONCE(txwin->nx_win && !re);
1109
1110 addr = txwin->paste_kaddr;
1111 if (re) {
1112 /*
1113 * Set the REPORT_ENABLE bit (equivalent to writing
1114 * to 1K offset of the paste address)
1115 */
1116 val = SET_FIELD(RMA_LSMP_REPORT_ENABLE, 0ULL, 1);
1117 addr += val;
1118 }
1119
1120 /*
1121 * Map the raw CR value from vas_paste() to an error code (there
1122 * is just pass or fail for now though).
1123 */
1124 rc = vas_paste(addr, offset);
1125 if (rc == 2)
1126 rc = 0;
1127 else
1128 rc = -EINVAL;
1129
1130 pr_debug("Txwin #%d: Msg count %llu\n", txwin->winid,
1131 read_hvwc_reg(txwin, VREG(LRFIFO_PUSH)));
1132
1133 return rc;
1134 }
1135 EXPORT_SYMBOL_GPL(vas_paste_crb);
1136
1137 /*
1138 * If credit checking is enabled for this window, poll for the return
1139 * of window credits (i.e for NX engines to process any outstanding CRBs).
1140 * Since NX-842 waits for the CRBs to be processed before closing the
1141 * window, we should not have to wait for too long.
1142 *
1143 * TODO: We retry in 10ms intervals now. We could/should probably peek at
1144 * the VAS_LRFIFO_PUSH_OFFSET register to get an estimate of pending
1145 * CRBs on the FIFO and compute the delay dynamically on each retry.
1146 * But that is not really needed until we support NX-GZIP access from
1147 * user space. (NX-842 driver waits for CSB and Fast thread-wakeup
1148 * doesn't use credit checking).
1149 */
1150 static void poll_window_credits(struct vas_window *window)
1151 {
1152 u64 val;
1153 int creds, mode;
1154
1155 val = read_hvwc_reg(window, VREG(WINCTL));
1156 if (window->tx_win)
1157 mode = GET_FIELD(VAS_WINCTL_TX_WCRED_MODE, val);
1158 else
1159 mode = GET_FIELD(VAS_WINCTL_RX_WCRED_MODE, val);
1160
1161 if (!mode)
1162 return;
1163 retry:
1164 if (window->tx_win) {
1165 val = read_hvwc_reg(window, VREG(TX_WCRED));
1166 creds = GET_FIELD(VAS_TX_WCRED, val);
1167 } else {
1168 val = read_hvwc_reg(window, VREG(LRX_WCRED));
1169 creds = GET_FIELD(VAS_LRX_WCRED, val);
1170 }
1171
1172 if (creds < window->wcreds_max) {
1173 val = 0;
1174 set_current_state(TASK_UNINTERRUPTIBLE);
1175 schedule_timeout(msecs_to_jiffies(10));
1176 goto retry;
1177 }
1178 }
1179
1180 /*
1181 * Wait for the window to go to "not-busy" state. It should only take a
1182 * short time to queue a CRB, so window should not be busy for too long.
1183 * Trying 5ms intervals.
1184 */
1185 static void poll_window_busy_state(struct vas_window *window)
1186 {
1187 int busy;
1188 u64 val;
1189
1190 retry:
1191 val = read_hvwc_reg(window, VREG(WIN_STATUS));
1192 busy = GET_FIELD(VAS_WIN_BUSY, val);
1193 if (busy) {
1194 val = 0;
1195 set_current_state(TASK_UNINTERRUPTIBLE);
1196 schedule_timeout(msecs_to_jiffies(5));
1197 goto retry;
1198 }
1199 }
1200
1201 /*
1202 * Have the hardware cast a window out of cache and wait for it to
1203 * be completed.
1204 *
1205 * NOTE: It can take a relatively long time to cast the window context
1206 * out of the cache. It is not strictly necessary to cast out if:
1207 *
1208 * - we clear the "Pin Window" bit (so hardware is free to evict)
1209 *
1210 * - we re-initialize the window context when it is reassigned.
1211 *
1212 * We do the former in vas_win_close() and latter in vas_win_open().
1213 * So, ignoring the cast-out for now. We can add it as needed. If
1214 * casting out becomes necessary we should consider offloading the
1215 * job to a worker thread, so the window close can proceed quickly.
1216 */
1217 static void poll_window_castout(struct vas_window *window)
1218 {
1219 /* stub for now */
1220 }
1221
1222 /*
1223 * Unpin and close a window so no new requests are accepted and the
1224 * hardware can evict this window from cache if necessary.
1225 */
1226 static void unpin_close_window(struct vas_window *window)
1227 {
1228 u64 val;
1229
1230 val = read_hvwc_reg(window, VREG(WINCTL));
1231 val = SET_FIELD(VAS_WINCTL_PIN, val, 0);
1232 val = SET_FIELD(VAS_WINCTL_OPEN, val, 0);
1233 write_hvwc_reg(window, VREG(WINCTL), val);
1234 }
1235
1236 /*
1237 * Close a window.
1238 *
1239 * See Section 1.12.1 of VAS workbook v1.05 for details on closing window:
1240 * - Disable new paste operations (unmap paste address)
1241 * - Poll for the "Window Busy" bit to be cleared
1242 * - Clear the Open/Enable bit for the Window.
1243 * - Poll for return of window Credits (implies FIFO empty for Rx win?)
1244 * - Unpin and cast window context out of cache
1245 *
1246 * Besides the hardware, kernel has some bookkeeping of course.
1247 */
1248 int vas_win_close(struct vas_window *window)
1249 {
1250 if (!window)
1251 return 0;
1252
1253 if (!window->tx_win && atomic_read(&window->num_txwins) != 0) {
1254 pr_devel("Attempting to close an active Rx window!\n");
1255 WARN_ON_ONCE(1);
1256 return -EBUSY;
1257 }
1258
1259 unmap_paste_region(window);
1260
1261 clear_vinst_win(window);
1262
1263 poll_window_busy_state(window);
1264
1265 unpin_close_window(window);
1266
1267 poll_window_credits(window);
1268
1269 poll_window_castout(window);
1270
1271 /* if send window, drop reference to matching receive window */
1272 if (window->tx_win)
1273 put_rx_win(window->rxwin);
1274
1275 vas_window_free(window);
1276
1277 return 0;
1278 }
1279 EXPORT_SYMBOL_GPL(vas_win_close);
1280
1281 /*
1282 * Return a system-wide unique window id for the window @win.
1283 */
1284 u32 vas_win_id(struct vas_window *win)
1285 {
1286 return encode_pswid(win->vinst->vas_id, win->winid);
1287 }
1288 EXPORT_SYMBOL_GPL(vas_win_id);