blob: 3290d6e00c3164674e2d2d018c9b992dbc5fc152 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
7 *
8 * Routines for PCI DMA mapping. See Documentation/DMA-API.txt for
9 * a description of how these routines should be used.
10 */
11
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
FUJITA Tomonorib4391dd2009-01-05 23:36:10 +090014#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/dma.h>
Mark Maule83821d32006-04-14 16:03:54 -050016#include <asm/sn/intr.h>
Mark Maule9b08ebd2005-04-25 11:32:16 -070017#include <asm/sn/pcibus_provider_defs.h>
18#include <asm/sn/pcidev.h>
Prarit Bhargavac13cf372005-07-06 15:26:51 -070019#include <asm/sn/sn_sal.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Jens Axboe58b053e2007-10-22 20:02:46 +020021#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
23
24/**
25 * sn_dma_supported - test a DMA mask
26 * @dev: device to test
27 * @mask: DMA mask to test
28 *
29 * Return whether the given PCI device DMA address mask can be supported
30 * properly. For example, if your device can only drive the low 24-bits
31 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
32 * this function. Of course, SN only supports devices that have 32 or more
33 * address bits when using the PMU.
34 */
FUJITA Tomonoricdc28d52009-01-05 23:36:15 +090035static int sn_dma_supported(struct device *dev, u64 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -070036{
37 BUG_ON(dev->bus != &pci_bus_type);
38
39 if (mask < 0x7fffffff)
40 return 0;
41 return 1;
42}
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44/**
45 * sn_dma_set_mask - set the DMA mask
46 * @dev: device to set
47 * @dma_mask: new mask
48 *
49 * Set @dev's DMA mask if the hw supports it.
50 */
51int sn_dma_set_mask(struct device *dev, u64 dma_mask)
52{
53 BUG_ON(dev->bus != &pci_bus_type);
54
55 if (!sn_dma_supported(dev, dma_mask))
56 return 0;
57
58 *dev->dma_mask = dma_mask;
59 return 1;
60}
61EXPORT_SYMBOL(sn_dma_set_mask);
62
63/**
64 * sn_dma_alloc_coherent - allocate memory for coherent DMA
65 * @dev: device to allocate for
66 * @size: size of the region
67 * @dma_handle: DMA (bus) address
68 * @flags: memory allocation flags
69 *
70 * dma_alloc_coherent() returns a pointer to a memory region suitable for
71 * coherent DMA traffic to/from a PCI device. On SN platforms, this means
72 * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
73 *
74 * This interface is usually used for "command" streams (e.g. the command
75 * queue for a SCSI controller). See Documentation/DMA-API.txt for
76 * more information.
77 */
FUJITA Tomonoricdc28d52009-01-05 23:36:15 +090078static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +020079 dma_addr_t * dma_handle, gfp_t flags,
80 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -070081{
82 void *cpuaddr;
83 unsigned long phys_addr;
Christoph Lameter7c2a6c62005-07-12 16:03:00 -070084 int node;
Mark Maulee955d822005-04-25 11:26:03 -070085 struct pci_dev *pdev = to_pci_dev(dev);
86 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
88 BUG_ON(dev->bus != &pci_bus_type);
89
90 /*
91 * Allocate the memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 */
Christoph Lameter7c2a6c62005-07-12 16:03:00 -070093 node = pcibus_to_node(pdev->bus);
94 if (likely(node >=0)) {
Mel Gorman6484eb32009-06-16 15:31:54 -070095 struct page *p = alloc_pages_exact_node(node,
96 flags, get_order(size));
Christoph Lameter7c2a6c62005-07-12 16:03:00 -070097
98 if (likely(p))
99 cpuaddr = page_address(p);
100 else
101 return NULL;
102 } else
Takashi Iwaidc641612006-01-24 14:30:56 -0800103 cpuaddr = (void *)__get_free_pages(flags, get_order(size));
Christoph Lameter7c2a6c62005-07-12 16:03:00 -0700104
105 if (unlikely(!cpuaddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 return NULL;
107
108 memset(cpuaddr, 0x0, size);
109
110 /* physical addr. of the memory we just got */
111 phys_addr = __pa(cpuaddr);
112
113 /*
114 * 64 bit address translations should never fail.
115 * 32 bit translations can fail if there are insufficient mapping
116 * resources.
117 */
118
Mark Maule83821d32006-04-14 16:03:54 -0500119 *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
120 SN_DMA_ADDR_PHYS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 if (!*dma_handle) {
Harvey Harrisond4ed8082008-03-04 15:15:00 -0800122 printk(KERN_ERR "%s: out of ATEs\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 free_pages((unsigned long)cpuaddr, get_order(size));
124 return NULL;
125 }
126
127 return cpuaddr;
128}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130/**
131 * sn_pci_free_coherent - free memory associated with coherent DMAable region
132 * @dev: device to free for
133 * @size: size to free
134 * @cpu_addr: kernel virtual address to free
135 * @dma_handle: DMA address associated with this region
136 *
137 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
138 * any associated IOMMU mappings.
139 */
FUJITA Tomonoricdc28d52009-01-05 23:36:15 +0900140static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200141 dma_addr_t dma_handle, struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
Mark Maulee955d822005-04-25 11:26:03 -0700143 struct pci_dev *pdev = to_pci_dev(dev);
144 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
146 BUG_ON(dev->bus != &pci_bus_type);
147
Mark Maulee955d822005-04-25 11:26:03 -0700148 provider->dma_unmap(pdev, dma_handle, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 free_pages((unsigned long)cpu_addr, get_order(size));
150}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
152/**
Arthur Kepner309df0c2008-04-29 01:00:32 -0700153 * sn_dma_map_single_attrs - map a single page for DMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 * @dev: device to map for
155 * @cpu_addr: kernel virtual address of the region to map
156 * @size: size of the region
157 * @direction: DMA direction
Arthur Kepner309df0c2008-04-29 01:00:32 -0700158 * @attrs: optional dma attributes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 *
160 * Map the region pointed to by @cpu_addr for DMA and return the
161 * DMA address.
162 *
163 * We map this to the one step pcibr_dmamap_trans interface rather than
164 * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
165 * no way of saving the dmamap handle from the alloc to later free
166 * (which is pretty much unacceptable).
167 *
Arthur Kepner309df0c2008-04-29 01:00:32 -0700168 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
169 * dma_map_consistent() so that writes force a flush of pending DMA.
170 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
171 * Document Number: 007-4763-001)
172 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 * TODO: simplify our interface;
174 * figure out how to save dmamap handle so can use two step.
175 */
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900176static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
177 unsigned long offset, size_t size,
178 enum dma_data_direction dir,
179 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900181 void *cpu_addr = page_address(page) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 dma_addr_t dma_addr;
183 unsigned long phys_addr;
Mark Maulee955d822005-04-25 11:26:03 -0700184 struct pci_dev *pdev = to_pci_dev(dev);
185 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
Arthur Kepner309df0c2008-04-29 01:00:32 -0700186 int dmabarr;
187
188 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
190 BUG_ON(dev->bus != &pci_bus_type);
191
192 phys_addr = __pa(cpu_addr);
Arthur Kepner309df0c2008-04-29 01:00:32 -0700193 if (dmabarr)
194 dma_addr = provider->dma_map_consistent(pdev, phys_addr,
195 size, SN_DMA_ADDR_PHYS);
196 else
197 dma_addr = provider->dma_map(pdev, phys_addr, size,
198 SN_DMA_ADDR_PHYS);
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 if (!dma_addr) {
Harvey Harrisond4ed8082008-03-04 15:15:00 -0800201 printk(KERN_ERR "%s: out of ATEs\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 return 0;
203 }
204 return dma_addr;
205}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207/**
Arthur Kepner309df0c2008-04-29 01:00:32 -0700208 * sn_dma_unmap_single_attrs - unamp a DMA mapped page
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 * @dev: device to sync
210 * @dma_addr: DMA address to sync
211 * @size: size of region
212 * @direction: DMA direction
Arthur Kepner309df0c2008-04-29 01:00:32 -0700213 * @attrs: optional dma attributes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 *
215 * This routine is supposed to sync the DMA region specified
216 * by @dma_handle into the coherence domain. On SN, we're always cache
217 * coherent, so we just need to free any ATEs associated with this mapping.
218 */
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900219static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
220 size_t size, enum dma_data_direction dir,
221 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222{
Mark Maulee955d822005-04-25 11:26:03 -0700223 struct pci_dev *pdev = to_pci_dev(dev);
224 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
226 BUG_ON(dev->bus != &pci_bus_type);
Mark Maulee955d822005-04-25 11:26:03 -0700227
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900228 provider->dma_unmap(pdev, dma_addr, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231/**
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900232 * sn_dma_unmap_sg - unmap a DMA scatterlist
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 * @dev: device to unmap
234 * @sg: scatterlist to unmap
235 * @nhwentries: number of scatterlist entries
236 * @direction: DMA direction
Arthur Kepner309df0c2008-04-29 01:00:32 -0700237 * @attrs: optional dma attributes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 *
239 * Unmap a set of streaming mode DMA translations.
240 */
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900241static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
242 int nhwentries, enum dma_data_direction dir,
243 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
245 int i;
Mark Maulee955d822005-04-25 11:26:03 -0700246 struct pci_dev *pdev = to_pci_dev(dev);
247 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
Jens Axboe9b6eccf2007-10-16 11:27:26 +0200248 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
250 BUG_ON(dev->bus != &pci_bus_type);
251
Jens Axboe9b6eccf2007-10-16 11:27:26 +0200252 for_each_sg(sgl, sg, nhwentries, i) {
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900253 provider->dma_unmap(pdev, sg->dma_address, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 sg->dma_address = (dma_addr_t) NULL;
255 sg->dma_length = 0;
256 }
257}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
259/**
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900260 * sn_dma_map_sg - map a scatterlist for DMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 * @dev: device to map for
262 * @sg: scatterlist to map
263 * @nhwentries: number of entries
264 * @direction: direction of the DMA transaction
Arthur Kepner309df0c2008-04-29 01:00:32 -0700265 * @attrs: optional dma attributes
266 *
267 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
268 * dma_map_consistent() so that writes force a flush of pending DMA.
269 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
270 * Document Number: 007-4763-001)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 *
272 * Maps each entry of @sg for DMA.
273 */
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900274static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
275 int nhwentries, enum dma_data_direction dir,
276 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277{
278 unsigned long phys_addr;
Jens Axboe9b6eccf2007-10-16 11:27:26 +0200279 struct scatterlist *saved_sg = sgl, *sg;
Mark Maulee955d822005-04-25 11:26:03 -0700280 struct pci_dev *pdev = to_pci_dev(dev);
281 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 int i;
Arthur Kepner309df0c2008-04-29 01:00:32 -0700283 int dmabarr;
284
285 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
287 BUG_ON(dev->bus != &pci_bus_type);
288
289 /*
290 * Setup a DMA address for each entry in the scatterlist.
291 */
Jens Axboe9b6eccf2007-10-16 11:27:26 +0200292 for_each_sg(sgl, sg, nhwentries, i) {
Arthur Kepner309df0c2008-04-29 01:00:32 -0700293 dma_addr_t dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 phys_addr = SG_ENT_PHYS_ADDRESS(sg);
Arthur Kepner309df0c2008-04-29 01:00:32 -0700295 if (dmabarr)
296 dma_addr = provider->dma_map_consistent(pdev,
297 phys_addr,
298 sg->length,
299 SN_DMA_ADDR_PHYS);
300 else
301 dma_addr = provider->dma_map(pdev, phys_addr,
302 sg->length,
303 SN_DMA_ADDR_PHYS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
Arthur Kepner309df0c2008-04-29 01:00:32 -0700305 sg->dma_address = dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 if (!sg->dma_address) {
Harvey Harrisond4ed8082008-03-04 15:15:00 -0800307 printk(KERN_ERR "%s: out of ATEs\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
309 /*
310 * Free any successfully allocated entries.
311 */
312 if (i > 0)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900313 sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 return 0;
315 }
316
317 sg->dma_length = sg->length;
318 }
319
320 return nhwentries;
321}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
FUJITA Tomonoricdc28d52009-01-05 23:36:15 +0900323static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900324 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325{
326 BUG_ON(dev->bus != &pci_bus_type);
327}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
FUJITA Tomonoricdc28d52009-01-05 23:36:15 +0900329static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900330 size_t size,
331 enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332{
333 BUG_ON(dev->bus != &pci_bus_type);
334}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
FUJITA Tomonoricdc28d52009-01-05 23:36:15 +0900336static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900337 int nelems, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338{
339 BUG_ON(dev->bus != &pci_bus_type);
340}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
FUJITA Tomonoricdc28d52009-01-05 23:36:15 +0900342static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900343 int nelems, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344{
345 BUG_ON(dev->bus != &pci_bus_type);
346}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
FUJITA Tomonoricdc28d52009-01-05 23:36:15 +0900348static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349{
350 return 0;
351}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
John Keller175add12008-11-24 16:47:17 -0600353u64 sn_dma_get_required_mask(struct device *dev)
354{
Yang Hongyang6a355282009-04-06 19:01:13 -0700355 return DMA_BIT_MASK(64);
John Keller175add12008-11-24 16:47:17 -0600356}
357EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
358
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359char *sn_pci_get_legacy_mem(struct pci_bus *bus)
360{
361 if (!SN_PCIBUS_BUSSOFT(bus))
362 return ERR_PTR(-ENODEV);
363
364 return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
365}
366
367int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
368{
369 unsigned long addr;
370 int ret;
Mark Maule61b9cf72005-09-23 12:31:53 -0500371 struct ia64_sal_retval isrv;
372
373 /*
374 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
375 * around hw issues at the pci bus level. SGI proms older than
Simon Arlott72fdbdc2007-05-11 14:55:43 -0700376 * 4.10 don't implement this.
Mark Maule61b9cf72005-09-23 12:31:53 -0500377 */
378
379 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
Jes Sorensen8ed9b2c2006-02-13 05:29:57 -0500380 pci_domain_nr(bus), bus->number,
381 0, /* io */
382 0, /* read */
383 port, size, __pa(val));
Mark Maule61b9cf72005-09-23 12:31:53 -0500384
385 if (isrv.status == 0)
386 return size;
387
388 /*
389 * If the above failed, retry using the SAL_PROBE call which should
390 * be present in all proms (but which cannot work round PCI chipset
Simon Arlott72fdbdc2007-05-11 14:55:43 -0700391 * bugs). This code is retained for compatibility with old
Mark Maule61b9cf72005-09-23 12:31:53 -0500392 * pre-4.10 proms, and should be removed at some point in the future.
393 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
395 if (!SN_PCIBUS_BUSSOFT(bus))
396 return -ENODEV;
397
398 addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
399 addr += port;
400
401 ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
402
403 if (ret == 2)
404 return -EINVAL;
405
406 if (ret == 1)
407 *val = -1;
408
409 return size;
410}
411
412int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
413{
414 int ret = size;
415 unsigned long paddr;
416 unsigned long *addr;
Mark Maule61b9cf72005-09-23 12:31:53 -0500417 struct ia64_sal_retval isrv;
418
419 /*
420 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
421 * around hw issues at the pci bus level. SGI proms older than
Simon Arlott72fdbdc2007-05-11 14:55:43 -0700422 * 4.10 don't implement this.
Mark Maule61b9cf72005-09-23 12:31:53 -0500423 */
424
425 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
Jes Sorensen8ed9b2c2006-02-13 05:29:57 -0500426 pci_domain_nr(bus), bus->number,
427 0, /* io */
428 1, /* write */
429 port, size, __pa(&val));
Mark Maule61b9cf72005-09-23 12:31:53 -0500430
431 if (isrv.status == 0)
432 return size;
433
434 /*
435 * If the above failed, retry using the SAL_PROBE call which should
436 * be present in all proms (but which cannot work round PCI chipset
Simon Arlott72fdbdc2007-05-11 14:55:43 -0700437 * bugs). This code is retained for compatibility with old
Mark Maule61b9cf72005-09-23 12:31:53 -0500438 * pre-4.10 proms, and should be removed at some point in the future.
439 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
441 if (!SN_PCIBUS_BUSSOFT(bus)) {
442 ret = -ENODEV;
443 goto out;
444 }
445
446 /* Put the phys addr in uncached space */
447 paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
448 paddr += port;
449 addr = (unsigned long *)paddr;
450
451 switch (size) {
452 case 1:
453 *(volatile u8 *)(addr) = (u8)(val);
454 break;
455 case 2:
456 *(volatile u16 *)(addr) = (u16)(val);
457 break;
458 case 4:
459 *(volatile u32 *)(addr) = (u32)(val);
460 break;
461 default:
462 ret = -EINVAL;
463 break;
464 }
465 out:
466 return ret;
467}
FUJITA Tomonorib4391dd2009-01-05 23:36:10 +0900468
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900469static struct dma_map_ops sn_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200470 .alloc = sn_dma_alloc_coherent,
471 .free = sn_dma_free_coherent,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900472 .map_page = sn_dma_map_page,
473 .unmap_page = sn_dma_unmap_page,
474 .map_sg = sn_dma_map_sg,
475 .unmap_sg = sn_dma_unmap_sg,
FUJITA Tomonorib4391dd2009-01-05 23:36:10 +0900476 .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
477 .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
478 .sync_single_for_device = sn_dma_sync_single_for_device,
479 .sync_sg_for_device = sn_dma_sync_sg_for_device,
480 .mapping_error = sn_dma_mapping_error,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900481 .dma_supported = sn_dma_supported,
FUJITA Tomonorib4391dd2009-01-05 23:36:10 +0900482};
FUJITA Tomonori4d9b9772009-01-05 23:36:12 +0900483
484void sn_dma_init(void)
485{
486 dma_ops = &sn_dma_ops;
487}